From beb4032e2b8c62e65fabcd8ef7cc4cf3d90535a3 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Wed, 5 May 2021 14:29:16 -0700 Subject: [PATCH] feat: add services to aiplatform_v1beta1 (#367) * feat: add services to aiplatform_v1beta1 feat: add featurestore, index, metadata, monitoring, pipeline, and tensorboard services to aiplatform_v1beta1 * add empty testing/constraints, lower coverage fail-under --- .coveragerc | 6 +- .github/header-checker-lint.yml | 2 +- .gitignore | 1 - .kokoro/release.sh | 4 +- .kokoro/release/common.cfg | 14 +- .pre-commit-config.yaml | 16 +- CONTRIBUTING.rst | 16 +- docs/_static/custom.css | 13 +- .../featurestore_online_serving_service.rst | 6 + .../featurestore_service.rst | 11 + .../index_endpoint_service.rst | 11 + docs/aiplatform_v1beta1/index_service.rst | 11 + docs/aiplatform_v1beta1/metadata_service.rst | 11 + docs/aiplatform_v1beta1/services.rst | 6 + .../tensorboard_service.rst | 11 + docs/conf.py | 13 + .../services/dataset_service/async_client.py | 52 +- .../services/dataset_service/client.py | 48 +- .../dataset_service/transports/base.py | 38 +- .../dataset_service/transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../services/endpoint_service/async_client.py | 46 +- .../services/endpoint_service/client.py | 48 +- .../endpoint_service/transports/base.py | 32 +- .../endpoint_service/transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../services/job_service/async_client.py | 106 +- .../services/job_service/client.py | 82 +- .../services/job_service/transports/base.py | 58 +- .../services/job_service/transports/grpc.py | 126 +- .../job_service/transports/grpc_asyncio.py | 131 +- .../migration_service/async_client.py | 8 +- .../services/migration_service/client.py | 30 +- .../migration_service/transports/base.py | 18 +- .../migration_service/transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../services/model_service/async_client.py | 52 +- .../services/model_service/client.py | 44 +- .../services/model_service/transports/base.py | 40 +- .../services/model_service/transports/grpc.py | 106 +- .../model_service/transports/grpc_asyncio.py | 111 +- .../services/pipeline_service/async_client.py | 36 +- .../services/pipeline_service/client.py | 30 +- .../pipeline_service/transports/base.py | 28 +- .../pipeline_service/transports/grpc.py | 114 +- .../transports/grpc_asyncio.py | 119 +- .../prediction_service/async_client.py | 10 +- .../services/prediction_service/client.py | 8 +- .../prediction_service/transports/base.py | 20 +- .../prediction_service/transports/grpc.py | 104 +- .../transports/grpc_asyncio.py | 109 +- .../specialist_pool_service/async_client.py | 24 +- .../specialist_pool_service/client.py | 26 +- .../transports/base.py | 28 +- .../transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../cloud/aiplatform_v1/types/annotation.py | 6 +- .../types/batch_prediction_job.py | 40 +- .../cloud/aiplatform_v1/types/custom_job.py | 2 +- .../aiplatform_v1/types/data_labeling_job.py | 2 +- google/cloud/aiplatform_v1/types/dataset.py | 4 +- .../aiplatform_v1/types/dataset_service.py | 36 +- google/cloud/aiplatform_v1/types/endpoint.py | 4 +- .../aiplatform_v1/types/endpoint_service.py | 42 +- .../cloud/aiplatform_v1/types/job_service.py | 60 +- .../aiplatform_v1/types/machine_resources.py | 18 +- .../aiplatform_v1/types/migration_service.py | 12 +- google/cloud/aiplatform_v1/types/model.py | 98 +- .../aiplatform_v1/types/model_evaluation.py | 8 +- .../types/model_evaluation_slice.py | 8 +- .../aiplatform_v1/types/model_service.py | 54 +- .../aiplatform_v1/types/pipeline_service.py | 18 +- .../aiplatform_v1/types/prediction_service.py | 10 +- .../types/specialist_pool_service.py | 20 +- .../aiplatform_v1/types/training_pipeline.py | 40 +- google/cloud/aiplatform_v1beta1/__init__.py | 432 + .../services/dataset_service/async_client.py | 32 +- .../services/dataset_service/client.py | 48 +- .../dataset_service/transports/base.py | 18 +- .../dataset_service/transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../services/endpoint_service/async_client.py | 32 +- .../services/endpoint_service/client.py | 48 +- .../endpoint_service/transports/base.py | 18 +- .../endpoint_service/transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../__init__.py | 24 + .../async_client.py | 365 + .../client.py | 545 ++ .../transports/__init__.py | 39 + .../transports/base.py | 144 + .../transports/grpc.py | 292 + .../transports/grpc_asyncio.py | 296 + .../services/featurestore_service/__init__.py | 24 + .../featurestore_service/async_client.py | 2048 +++++ .../services/featurestore_service/client.py | 2279 +++++ .../services/featurestore_service/pagers.py | 550 ++ .../transports/__init__.py | 37 + .../featurestore_service/transports/base.py | 388 + .../featurestore_service/transports/grpc.py | 830 ++ .../transports/grpc_asyncio.py | 857 ++ .../index_endpoint_service/__init__.py | 24 + .../index_endpoint_service/async_client.py | 829 ++ .../services/index_endpoint_service/client.py | 1023 +++ .../services/index_endpoint_service/pagers.py | 162 + .../transports/__init__.py | 37 + .../index_endpoint_service/transports/base.py | 219 + .../index_endpoint_service/transports/grpc.py | 442 + .../transports/grpc_asyncio.py | 455 + .../services/index_service/__init__.py | 24 + .../services/index_service/async_client.py | 640 ++ .../services/index_service/client.py | 833 ++ .../services/index_service/pagers.py | 158 + .../index_service/transports/__init__.py | 35 + .../services/index_service/transports/base.py | 183 + .../services/index_service/transports/grpc.py | 376 + .../index_service/transports/grpc_asyncio.py | 384 + .../services/job_service/async_client.py | 781 +- .../services/job_service/client.py | 896 +- .../services/job_service/pagers.py | 291 + .../services/job_service/transports/base.py | 152 +- .../services/job_service/transports/grpc.py | 385 +- .../job_service/transports/grpc_asyncio.py | 396 +- .../services/metadata_service/__init__.py | 24 + .../services/metadata_service/async_client.py | 2479 +++++ .../services/metadata_service/client.py | 2762 ++++++ .../services/metadata_service/pagers.py | 676 ++ .../metadata_service/transports/__init__.py | 35 + .../metadata_service/transports/base.py | 502 + .../metadata_service/transports/grpc.py | 993 ++ .../transports/grpc_asyncio.py | 1023 +++ .../migration_service/async_client.py | 8 +- .../services/migration_service/client.py | 30 +- .../migration_service/transports/base.py | 18 +- .../migration_service/transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../services/model_service/async_client.py | 32 +- .../services/model_service/client.py | 44 +- .../services/model_service/transports/base.py | 18 +- .../services/model_service/transports/grpc.py | 106 +- .../model_service/transports/grpc_asyncio.py | 111 +- .../services/pipeline_service/async_client.py | 468 +- .../services/pipeline_service/client.py | 573 +- .../services/pipeline_service/pagers.py | 129 + .../pipeline_service/transports/base.py | 87 +- .../pipeline_service/transports/grpc.py | 264 +- .../transports/grpc_asyncio.py | 274 +- .../prediction_service/async_client.py | 26 +- .../services/prediction_service/client.py | 26 +- .../prediction_service/transports/base.py | 18 +- .../prediction_service/transports/grpc.py | 112 +- .../transports/grpc_asyncio.py | 117 +- .../specialist_pool_service/async_client.py | 14 +- .../specialist_pool_service/client.py | 26 +- .../transports/base.py | 18 +- .../transports/grpc.py | 106 +- .../transports/grpc_asyncio.py | 111 +- .../services/tensorboard_service/__init__.py | 24 + .../tensorboard_service/async_client.py | 2346 +++++ .../services/tensorboard_service/client.py | 2647 ++++++ .../services/tensorboard_service/pagers.py | 700 ++ .../transports/__init__.py | 37 + .../tensorboard_service/transports/base.py | 509 ++ .../tensorboard_service/transports/grpc.py | 962 ++ .../transports/grpc_asyncio.py | 980 ++ .../services/vizier_service/async_client.py | 44 +- .../services/vizier_service/client.py | 44 +- .../vizier_service/transports/base.py | 18 +- .../vizier_service/transports/grpc.py | 110 +- .../vizier_service/transports/grpc_asyncio.py | 115 +- .../aiplatform_v1beta1/types/__init__.py | 436 + .../aiplatform_v1beta1/types/annotation.py | 6 +- .../aiplatform_v1beta1/types/artifact.py | 123 + .../types/batch_prediction_job.py | 66 +- .../cloud/aiplatform_v1beta1/types/context.py | 108 + .../aiplatform_v1beta1/types/custom_job.py | 10 +- .../types/data_labeling_job.py | 2 +- .../cloud/aiplatform_v1beta1/types/dataset.py | 4 +- .../types/dataset_service.py | 36 +- .../types/deployed_index_ref.py | 43 + .../aiplatform_v1beta1/types/endpoint.py | 18 +- .../types/endpoint_service.py | 42 +- .../aiplatform_v1beta1/types/entity_type.py | 101 + .../cloud/aiplatform_v1beta1/types/event.py | 80 + .../aiplatform_v1beta1/types/execution.py | 119 + .../aiplatform_v1beta1/types/explanation.py | 84 +- .../types/explanation_metadata.py | 22 +- .../cloud/aiplatform_v1beta1/types/feature.py | 132 + .../types/feature_monitoring_stats.py | 107 + .../types/feature_selector.py | 54 + .../aiplatform_v1beta1/types/featurestore.py | 114 + .../types/featurestore_monitoring.py | 73 + .../types/featurestore_online_service.py | 282 + .../types/featurestore_service.py | 1281 +++ .../cloud/aiplatform_v1beta1/types/index.py | 118 + .../types/index_endpoint.py | 260 + .../types/index_endpoint_service.py | 298 + .../aiplatform_v1beta1/types/index_service.py | 301 + google/cloud/aiplatform_v1beta1/types/io.py | 48 + .../aiplatform_v1beta1/types/job_service.py | 363 +- .../types/lineage_subgraph.py | 54 + .../types/machine_resources.py | 26 +- .../types/metadata_schema.py | 81 + .../types/metadata_service.py | 971 ++ .../types/metadata_store.py | 82 + .../types/migration_service.py | 32 +- .../cloud/aiplatform_v1beta1/types/model.py | 137 +- .../types/model_deployment_monitoring_job.py | 354 + .../types/model_evaluation.py | 10 +- .../types/model_evaluation_slice.py | 8 +- .../types/model_monitoring.py | 220 + .../aiplatform_v1beta1/types/model_service.py | 54 +- .../aiplatform_v1beta1/types/pipeline_job.py | 353 + .../types/pipeline_service.py | 158 +- .../types/prediction_service.py | 28 +- .../types/specialist_pool_service.py | 20 +- .../cloud/aiplatform_v1beta1/types/study.py | 37 +- .../aiplatform_v1beta1/types/tensorboard.py | 108 + .../types/tensorboard_data.py | 161 + .../types/tensorboard_experiment.py | 95 + .../types/tensorboard_run.py | 74 + .../types/tensorboard_service.py | 892 ++ .../types/tensorboard_time_series.py | 123 + .../types/training_pipeline.py | 40 +- .../cloud/aiplatform_v1beta1/types/types.py | 71 + .../types/user_action_reference.py | 5 +- .../cloud/aiplatform_v1beta1/types/value.py | 45 + .../types/vizier_service.py | 40 +- noxfile.py | 38 +- renovate.json | 5 +- synth.py | 56 - testing/constraints-3.8.txt | 0 testing/constraints-3.9.txt | 0 .../aiplatform_v1/test_dataset_service.py | 4 +- .../aiplatform_v1/test_endpoint_service.py | 4 +- .../gapic/aiplatform_v1/test_job_service.py | 8 +- .../aiplatform_v1/test_migration_service.py | 32 +- .../gapic/aiplatform_v1/test_model_service.py | 8 +- .../aiplatform_v1/test_pipeline_service.py | 4 +- .../test_specialist_pool_service.py | 4 +- .../test_dataset_service.py | 4 +- .../test_endpoint_service.py | 4 +- ...est_featurestore_online_serving_service.py | 1501 +++ .../test_featurestore_service.py | 6319 +++++++++++++ .../test_index_endpoint_service.py | 2904 ++++++ .../aiplatform_v1beta1/test_index_service.py | 2170 +++++ .../aiplatform_v1beta1/test_job_service.py | 2949 +++++- .../test_metadata_service.py | 8144 +++++++++++++++++ .../test_migration_service.py | 32 +- .../aiplatform_v1beta1/test_model_service.py | 8 +- .../test_pipeline_service.py | 1557 +++- .../test_specialist_pool_service.py | 4 +- .../test_tensorboard_service.py | 8115 ++++++++++++++++ .../aiplatform_v1beta1/test_vizier_service.py | 65 +- 254 files changed, 80478 insertions(+), 4293 deletions(-) create mode 100644 docs/aiplatform_v1beta1/featurestore_online_serving_service.rst create mode 100644 docs/aiplatform_v1beta1/featurestore_service.rst create mode 100644 docs/aiplatform_v1beta1/index_endpoint_service.rst create mode 100644 docs/aiplatform_v1beta1/index_service.rst create mode 100644 docs/aiplatform_v1beta1/metadata_service.rst create mode 100644 docs/aiplatform_v1beta1/tensorboard_service.rst create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py create mode 100644 google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/async_client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/pagers.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py create mode 100644 google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py create mode 100644 google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py create mode 100644 google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py create mode 100644 google/cloud/aiplatform_v1beta1/types/artifact.py create mode 100644 google/cloud/aiplatform_v1beta1/types/context.py create mode 100644 google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py create mode 100644 google/cloud/aiplatform_v1beta1/types/entity_type.py create mode 100644 google/cloud/aiplatform_v1beta1/types/event.py create mode 100644 google/cloud/aiplatform_v1beta1/types/execution.py create mode 100644 google/cloud/aiplatform_v1beta1/types/feature.py create mode 100644 google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py create mode 100644 google/cloud/aiplatform_v1beta1/types/feature_selector.py create mode 100644 google/cloud/aiplatform_v1beta1/types/featurestore.py create mode 100644 google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py create mode 100644 google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py create mode 100644 google/cloud/aiplatform_v1beta1/types/featurestore_service.py create mode 100644 google/cloud/aiplatform_v1beta1/types/index.py create mode 100644 google/cloud/aiplatform_v1beta1/types/index_endpoint.py create mode 100644 google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py create mode 100644 google/cloud/aiplatform_v1beta1/types/index_service.py create mode 100644 google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py create mode 100644 google/cloud/aiplatform_v1beta1/types/metadata_schema.py create mode 100644 google/cloud/aiplatform_v1beta1/types/metadata_service.py create mode 100644 google/cloud/aiplatform_v1beta1/types/metadata_store.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_monitoring.py create mode 100644 google/cloud/aiplatform_v1beta1/types/pipeline_job.py create mode 100644 google/cloud/aiplatform_v1beta1/types/tensorboard.py create mode 100644 google/cloud/aiplatform_v1beta1/types/tensorboard_data.py create mode 100644 google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py create mode 100644 google/cloud/aiplatform_v1beta1/types/tensorboard_run.py create mode 100644 google/cloud/aiplatform_v1beta1/types/tensorboard_service.py create mode 100644 google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py create mode 100644 google/cloud/aiplatform_v1beta1/types/types.py create mode 100644 google/cloud/aiplatform_v1beta1/types/value.py create mode 100644 testing/constraints-3.8.txt create mode 100644 testing/constraints-3.9.txt create mode 100644 tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py create mode 100644 tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py create mode 100644 tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py create mode 100644 tests/unit/gapic/aiplatform_v1beta1/test_index_service.py create mode 100644 tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py create mode 100644 tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py diff --git a/.coveragerc b/.coveragerc index 2719524048..5b3f287a0f 100644 --- a/.coveragerc +++ b/.coveragerc @@ -2,10 +2,10 @@ branch = True [report] -fail_under = 99 +fail_under = 100 show_missing = True omit = - .nox/* + google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py exclude_lines = # Re-enable the standard pragma pragma: NO COVER @@ -15,4 +15,4 @@ exclude_lines = # This is added at the module level as a safeguard for if someone # generates the code and tries to run it without pip installing. This # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound \ No newline at end of file + except pkg_resources.DistributionNotFound diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml index fc281c05bd..6fe78aa798 100644 --- a/.github/header-checker-lint.yml +++ b/.github/header-checker-lint.yml @@ -1,6 +1,6 @@ {"allowedCopyrightHolders": ["Google LLC"], "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"], - "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"], + "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt", "**/__init__.py", "samples/**/constraints.txt", "samples/**/constraints-test.txt"], "sourceFileExtensions": [ "ts", "js", diff --git a/.gitignore b/.gitignore index 5555e7de6d..b4243ced74 100644 --- a/.gitignore +++ b/.gitignore @@ -26,7 +26,6 @@ pip-log.txt # Unit test / coverage reports .coverage -.coverage.* .nox .cache .pytest_cache diff --git a/.kokoro/release.sh b/.kokoro/release.sh index ab2a347901..62bdb892ff 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -26,7 +26,7 @@ python3 -m pip install --upgrade twine wheel setuptools export PYTHONUNBUFFERED=1 # Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google_cloud_pypi_password") +TWINE_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secret_manager/google-cloud-pypi-token") cd github/python-aiplatform python3 setup.py sdist bdist_wheel -twine upload --username gcloudpypi --password "${TWINE_PASSWORD}" dist/* +twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg index ff589f8e66..5293e75110 100644 --- a/.kokoro/release/common.cfg +++ b/.kokoro/release/common.cfg @@ -23,18 +23,8 @@ env_vars: { value: "github/python-aiplatform/.kokoro/release.sh" } -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google_cloud_pypi_password" - } - } -} - # Tokens needed to report release status back to GitHub env_vars: { key: "SECRET_MANAGER_KEYS" - value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" -} \ No newline at end of file + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem,google-cloud-pypi-token" +} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a9024b15d7..1bbd787833 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,17 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: @@ -12,6 +26,6 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.4 + rev: 3.9.1 hooks: - id: flake8 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 66216c172d..f865e3769d 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -160,21 +160,7 @@ Running System Tests auth settings and change some configuration in your project to run all the tests. -- System tests will be run against an actual project and - so you'll need to provide some environment variables to facilitate - authentication to your project: - - - ``GOOGLE_APPLICATION_CREDENTIALS``: The path to a JSON key file; - Such a file can be downloaded directly from the developer's console by clicking - "Generate new JSON key". See private key - `docs `__ - for more details. - -- Once you have downloaded your json keys, set the environment variable - ``GOOGLE_APPLICATION_CREDENTIALS`` to the absolute path of the json file:: - - $ export GOOGLE_APPLICATION_CREDENTIALS="/Users//path/to/app_credentials.json" - +- System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication `__. Some tests require a service account. For those tests see `Authenticating as a service account `__. ************* Test Coverage diff --git a/docs/_static/custom.css b/docs/_static/custom.css index bcd37bbd3c..b0a295464b 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,9 +1,20 @@ div#python2-eol { border-color: red; border-width: medium; -} +} /* Ensure minimum width for 'Parameters' / 'Returns' column */ dl.field-list > dt { min-width: 100px } + +/* Insert space between methods for readability */ +dl.method { + padding-top: 10px; + padding-bottom: 10px +} + +/* Insert empty space between classes */ +dl.class { + padding-bottom: 50px +} diff --git a/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst b/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst new file mode 100644 index 0000000000..21013eb751 --- /dev/null +++ b/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst @@ -0,0 +1,6 @@ +FeaturestoreOnlineServingService +-------------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/featurestore_service.rst b/docs/aiplatform_v1beta1/featurestore_service.rst new file mode 100644 index 0000000000..d05deb4c2c --- /dev/null +++ b/docs/aiplatform_v1beta1/featurestore_service.rst @@ -0,0 +1,11 @@ +FeaturestoreService +------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/index_endpoint_service.rst b/docs/aiplatform_v1beta1/index_endpoint_service.rst new file mode 100644 index 0000000000..2389e5bf64 --- /dev/null +++ b/docs/aiplatform_v1beta1/index_endpoint_service.rst @@ -0,0 +1,11 @@ +IndexEndpointService +-------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/index_service.rst b/docs/aiplatform_v1beta1/index_service.rst new file mode 100644 index 0000000000..e42ade6eaa --- /dev/null +++ b/docs/aiplatform_v1beta1/index_service.rst @@ -0,0 +1,11 @@ +IndexService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/metadata_service.rst b/docs/aiplatform_v1beta1/metadata_service.rst new file mode 100644 index 0000000000..c1ebfa9585 --- /dev/null +++ b/docs/aiplatform_v1beta1/metadata_service.rst @@ -0,0 +1,11 @@ +MetadataService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/services.rst b/docs/aiplatform_v1beta1/services.rst index 6e4f84c707..490112c7d9 100644 --- a/docs/aiplatform_v1beta1/services.rst +++ b/docs/aiplatform_v1beta1/services.rst @@ -5,10 +5,16 @@ Services for Google Cloud Aiplatform v1beta1 API dataset_service endpoint_service + featurestore_online_serving_service + featurestore_service + index_endpoint_service + index_service job_service + metadata_service migration_service model_service pipeline_service prediction_service specialist_pool_service + tensorboard_service vizier_service diff --git a/docs/aiplatform_v1beta1/tensorboard_service.rst b/docs/aiplatform_v1beta1/tensorboard_service.rst new file mode 100644 index 0000000000..423efcd796 --- /dev/null +++ b/docs/aiplatform_v1beta1/tensorboard_service.rst @@ -0,0 +1,11 @@ +TensorboardService +------------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers + :members: + :inherited-members: diff --git a/docs/conf.py b/docs/conf.py index 98e68be241..cd484b1e23 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,17 @@ # -*- coding: utf-8 -*- +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # google-cloud-aiplatform documentation build configuration file # diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index a07ee32dfd..0faf10bac8 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.dataset_service import pagers from google.cloud.aiplatform_v1.types import annotation @@ -205,7 +205,7 @@ async def create_dataset( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateDatasetRequest`): The request object. Request message for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. parent (:class:`str`): Required. The resource name of the Location to create the Dataset in. Format: @@ -259,7 +259,7 @@ async def create_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -297,7 +297,7 @@ async def get_dataset( Args: request (:class:`google.cloud.aiplatform_v1.types.GetDatasetRequest`): The request object. Request message for - ``DatasetService.GetDataset``. + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. name (:class:`str`): Required. The name of the Dataset resource. @@ -340,7 +340,7 @@ async def get_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -371,7 +371,7 @@ async def update_dataset( Args: request (:class:`google.cloud.aiplatform_v1.types.UpdateDatasetRequest`): The request object. Request message for - ``DatasetService.UpdateDataset``. + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): Required. The Dataset which replaces the resource on the server. @@ -429,7 +429,7 @@ async def update_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -461,7 +461,7 @@ async def list_datasets( Args: request (:class:`google.cloud.aiplatform_v1.types.ListDatasetsRequest`): The request object. Request message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. parent (:class:`str`): Required. The name of the Dataset's parent resource. Format: ``projects/{project}/locations/{location}`` @@ -479,7 +479,7 @@ async def list_datasets( Returns: google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager: Response message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. Iterating over this object will yield results and resolve additional pages automatically. @@ -507,7 +507,7 @@ async def list_datasets( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_datasets, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -543,7 +543,7 @@ async def delete_dataset( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteDatasetRequest`): The request object. Request message for - ``DatasetService.DeleteDataset``. + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. name (:class:`str`): Required. The resource name of the Dataset to delete. Format: @@ -600,7 +600,7 @@ async def delete_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -639,7 +639,7 @@ async def import_data( Args: request (:class:`google.cloud.aiplatform_v1.types.ImportDataRequest`): The request object. Request message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. name (:class:`str`): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -669,7 +669,7 @@ async def import_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` Response message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. """ # Create or coerce a protobuf request object. @@ -697,7 +697,7 @@ async def import_data( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.import_data, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -736,7 +736,7 @@ async def export_data( Args: request (:class:`google.cloud.aiplatform_v1.types.ExportDataRequest`): The request object. Request message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. name (:class:`str`): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -765,7 +765,7 @@ async def export_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` Response message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. """ # Create or coerce a protobuf request object. @@ -792,7 +792,7 @@ async def export_data( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.export_data, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -830,7 +830,7 @@ async def list_data_items( Args: request (:class:`google.cloud.aiplatform_v1.types.ListDataItemsRequest`): The request object. Request message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. parent (:class:`str`): Required. The resource name of the Dataset to list DataItems from. Format: @@ -849,7 +849,7 @@ async def list_data_items( Returns: google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager: Response message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. Iterating over this object will yield results and resolve additional pages automatically. @@ -877,7 +877,7 @@ async def list_data_items( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_data_items, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -913,7 +913,7 @@ async def get_annotation_spec( Args: request (:class:`google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest`): The request object. Request message for - ``DatasetService.GetAnnotationSpec``. + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. name (:class:`str`): Required. The name of the AnnotationSpec resource. Format: @@ -958,7 +958,7 @@ async def get_annotation_spec( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_annotation_spec, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -988,7 +988,7 @@ async def list_annotations( Args: request (:class:`google.cloud.aiplatform_v1.types.ListAnnotationsRequest`): The request object. Request message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. parent (:class:`str`): Required. The resource name of the DataItem to list Annotations from. Format: @@ -1008,7 +1008,7 @@ async def list_annotations( Returns: google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager: Response message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1036,7 +1036,7 @@ async def list_annotations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_annotations, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 160a2049b8..e1fcc167f2 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.dataset_service import pagers from google.cloud.aiplatform_v1.types import annotation @@ -426,13 +426,13 @@ def create_dataset( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Creates a Dataset. Args: request (google.cloud.aiplatform_v1.types.CreateDatasetRequest): The request object. Request message for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. parent (str): Required. The resource name of the Location to create the Dataset in. Format: @@ -501,7 +501,7 @@ def create_dataset( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_dataset.Dataset, @@ -525,7 +525,7 @@ def get_dataset( Args: request (google.cloud.aiplatform_v1.types.GetDatasetRequest): The request object. Request message for - ``DatasetService.GetDataset``. + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. name (str): Required. The name of the Dataset resource. @@ -600,7 +600,7 @@ def update_dataset( Args: request (google.cloud.aiplatform_v1.types.UpdateDatasetRequest): The request object. Request message for - ``DatasetService.UpdateDataset``. + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. dataset (google.cloud.aiplatform_v1.types.Dataset): Required. The Dataset which replaces the resource on the server. @@ -691,7 +691,7 @@ def list_datasets( Args: request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): The request object. Request message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. parent (str): Required. The name of the Dataset's parent resource. Format: ``projects/{project}/locations/{location}`` @@ -709,7 +709,7 @@ def list_datasets( Returns: google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager: Response message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. Iterating over this object will yield results and resolve additional pages automatically. @@ -768,13 +768,13 @@ def delete_dataset( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a Dataset. Args: request (google.cloud.aiplatform_v1.types.DeleteDatasetRequest): The request object. Request message for - ``DatasetService.DeleteDataset``. + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. name (str): Required. The resource name of the Dataset to delete. Format: @@ -846,7 +846,7 @@ def delete_dataset( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -865,13 +865,13 @@ def import_data( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Imports data into a Dataset. Args: request (google.cloud.aiplatform_v1.types.ImportDataRequest): The request object. Request message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. name (str): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -901,7 +901,7 @@ def import_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` Response message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. """ # Create or coerce a protobuf request object. @@ -943,7 +943,7 @@ def import_data( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, dataset_service.ImportDataResponse, @@ -962,13 +962,13 @@ def export_data( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Exports data from a Dataset. Args: request (google.cloud.aiplatform_v1.types.ExportDataRequest): The request object. Request message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. name (str): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -997,7 +997,7 @@ def export_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` Response message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. """ # Create or coerce a protobuf request object. @@ -1039,7 +1039,7 @@ def export_data( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, dataset_service.ExportDataResponse, @@ -1063,7 +1063,7 @@ def list_data_items( Args: request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): The request object. Request message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. parent (str): Required. The resource name of the Dataset to list DataItems from. Format: @@ -1082,7 +1082,7 @@ def list_data_items( Returns: google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager: Response message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1147,7 +1147,7 @@ def get_annotation_spec( Args: request (google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest): The request object. Request message for - ``DatasetService.GetAnnotationSpec``. + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. name (str): Required. The name of the AnnotationSpec resource. Format: @@ -1223,7 +1223,7 @@ def list_annotations( Args: request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): The request object. Request message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. parent (str): Required. The resource name of the DataItem to list Annotations from. Format: @@ -1243,7 +1243,7 @@ def list_annotations( Returns: google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager: Response message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py index 2ab4419d03..10653cbf25 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -74,10 +74,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -85,6 +85,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -94,52 +97,49 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, default_timeout=None, client_info=client_info, + self.create_dataset, default_timeout=5.0, client_info=client_info, ), self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, default_timeout=None, client_info=client_info, + self.get_dataset, default_timeout=5.0, client_info=client_info, ), self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, default_timeout=None, client_info=client_info, + self.update_dataset, default_timeout=5.0, client_info=client_info, ), self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, default_timeout=None, client_info=client_info, + self.list_datasets, default_timeout=5.0, client_info=client_info, ), self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, default_timeout=None, client_info=client_info, + self.delete_dataset, default_timeout=5.0, client_info=client_info, ), self.import_data: gapic_v1.method.wrap_method( - self.import_data, default_timeout=None, client_info=client_info, + self.import_data, default_timeout=5.0, client_info=client_info, ), self.export_data: gapic_v1.method.wrap_method( - self.export_data, default_timeout=None, client_info=client_info, + self.export_data, default_timeout=5.0, client_info=client_info, ), self.list_data_items: gapic_v1.method.wrap_method( - self.list_data_items, default_timeout=None, client_info=client_info, + self.list_data_items, default_timeout=5.0, client_info=client_info, ), self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, default_timeout=None, client_info=client_info, + self.get_annotation_spec, default_timeout=5.0, client_info=client_info, ), self.list_annotations: gapic_v1.method.wrap_method( - self.list_annotations, default_timeout=None, client_info=client_info, + self.list_annotations, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index 20a01deb79..65bd8baf79 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -109,7 +109,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -117,70 +120,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -188,18 +171,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -213,7 +186,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -248,7 +221,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py index bcf3331d6b..90d4dc67f2 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -64,7 +64,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -142,10 +142,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -154,7 +154,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -162,70 +165,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -233,18 +216,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 13f099328b..d66270549f 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec @@ -195,7 +195,7 @@ async def create_endpoint( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateEndpointRequest`): The request object. Request message for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. parent (:class:`str`): Required. The resource name of the Location to create the Endpoint in. Format: @@ -248,7 +248,7 @@ async def create_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -286,7 +286,7 @@ async def get_endpoint( Args: request (:class:`google.cloud.aiplatform_v1.types.GetEndpointRequest`): The request object. Request message for - ``EndpointService.GetEndpoint`` + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] name (:class:`str`): Required. The name of the Endpoint resource. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` @@ -330,7 +330,7 @@ async def get_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -360,7 +360,7 @@ async def list_endpoints( Args: request (:class:`google.cloud.aiplatform_v1.types.ListEndpointsRequest`): The request object. Request message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. parent (:class:`str`): Required. The resource name of the Location from which to list the Endpoints. Format: @@ -379,7 +379,7 @@ async def list_endpoints( Returns: google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager: Response message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. Iterating over this object will yield results and resolve additional pages automatically. @@ -407,7 +407,7 @@ async def list_endpoints( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_endpoints, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -444,7 +444,7 @@ async def update_endpoint( Args: request (:class:`google.cloud.aiplatform_v1.types.UpdateEndpointRequest`): The request object. Request message for - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): Required. The Endpoint which replaces the resource on the server. @@ -497,7 +497,7 @@ async def update_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -529,7 +529,7 @@ async def delete_endpoint( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteEndpointRequest`): The request object. Request message for - ``EndpointService.DeleteEndpoint``. + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. name (:class:`str`): Required. The name of the Endpoint resource to be deleted. Format: @@ -586,7 +586,7 @@ async def delete_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -629,7 +629,7 @@ async def deploy_model( Args: request (:class:`google.cloud.aiplatform_v1.types.DeployModelRequest`): The request object. Request message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. endpoint (:class:`str`): Required. The name of the Endpoint resource into which to deploy a Model. Format: @@ -641,10 +641,10 @@ async def deploy_model( deployed_model (:class:`google.cloud.aiplatform_v1.types.DeployedModel`): Required. The DeployedModel to be created within the Endpoint. Note that - ``Endpoint.traffic_split`` + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this @@ -655,7 +655,7 @@ async def deploy_model( DeployedModel. If this field is non-empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its @@ -663,7 +663,7 @@ async def deploy_model( add up to 100. If this field is empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] is not updated. This corresponds to the ``traffic_split`` field @@ -683,7 +683,7 @@ async def deploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` Response message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. """ # Create or coerce a protobuf request object. @@ -713,7 +713,7 @@ async def deploy_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.deploy_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -757,7 +757,7 @@ async def undeploy_model( Args: request (:class:`google.cloud.aiplatform_v1.types.UndeployModelRequest`): The request object. Request message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. endpoint (:class:`str`): Required. The name of the Endpoint resource from which to undeploy a Model. Format: @@ -775,7 +775,7 @@ async def undeploy_model( should not be set. traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]`): If this field is provided, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when @@ -801,7 +801,7 @@ async def undeploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` Response message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. """ # Create or coerce a protobuf request object. @@ -831,7 +831,7 @@ async def undeploy_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.undeploy_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index de54b0b9b5..e4a5878537 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec @@ -377,13 +377,13 @@ def create_endpoint( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Creates an Endpoint. Args: request (google.cloud.aiplatform_v1.types.CreateEndpointRequest): The request object. Request message for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. parent (str): Required. The resource name of the Location to create the Endpoint in. Format: @@ -451,7 +451,7 @@ def create_endpoint( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_endpoint.Endpoint, @@ -475,7 +475,7 @@ def get_endpoint( Args: request (google.cloud.aiplatform_v1.types.GetEndpointRequest): The request object. Request message for - ``EndpointService.GetEndpoint`` + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] name (str): Required. The name of the Endpoint resource. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` @@ -550,7 +550,7 @@ def list_endpoints( Args: request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): The request object. Request message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. parent (str): Required. The resource name of the Location from which to list the Endpoints. Format: @@ -569,7 +569,7 @@ def list_endpoints( Returns: google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager: Response message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. Iterating over this object will yield results and resolve additional pages automatically. @@ -635,7 +635,7 @@ def update_endpoint( Args: request (google.cloud.aiplatform_v1.types.UpdateEndpointRequest): The request object. Request message for - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. endpoint (google.cloud.aiplatform_v1.types.Endpoint): Required. The Endpoint which replaces the resource on the server. @@ -715,13 +715,13 @@ def delete_endpoint( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes an Endpoint. Args: request (google.cloud.aiplatform_v1.types.DeleteEndpointRequest): The request object. Request message for - ``EndpointService.DeleteEndpoint``. + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. name (str): Required. The name of the Endpoint resource to be deleted. Format: @@ -793,7 +793,7 @@ def delete_endpoint( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -815,14 +815,14 @@ def deploy_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deploys a Model into this Endpoint, creating a DeployedModel within it. Args: request (google.cloud.aiplatform_v1.types.DeployModelRequest): The request object. Request message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. endpoint (str): Required. The name of the Endpoint resource into which to deploy a Model. Format: @@ -834,10 +834,10 @@ def deploy_model( deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): Required. The DeployedModel to be created within the Endpoint. Note that - ``Endpoint.traffic_split`` + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this @@ -848,7 +848,7 @@ def deploy_model( DeployedModel. If this field is non-empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its @@ -856,7 +856,7 @@ def deploy_model( add up to 100. If this field is empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] is not updated. This corresponds to the ``traffic_split`` field @@ -876,7 +876,7 @@ def deploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` Response message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. """ # Create or coerce a protobuf request object. @@ -920,7 +920,7 @@ def deploy_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, endpoint_service.DeployModelResponse, @@ -942,7 +942,7 @@ def undeploy_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Undeploys a Model from an Endpoint, removing a DeployedModel from it, and freeing all resources it's using. @@ -950,7 +950,7 @@ def undeploy_model( Args: request (google.cloud.aiplatform_v1.types.UndeployModelRequest): The request object. Request message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. endpoint (str): Required. The name of the Endpoint resource from which to undeploy a Model. Format: @@ -968,7 +968,7 @@ def undeploy_model( should not be set. traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): If this field is provided, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when @@ -994,7 +994,7 @@ def undeploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` Response message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. """ # Create or coerce a protobuf request object. @@ -1038,7 +1038,7 @@ def undeploy_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, endpoint_service.UndeployModelResponse, diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py index 728c38fec3..054d6c9b01 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -73,10 +73,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -84,6 +84,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -93,43 +96,40 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_endpoint: gapic_v1.method.wrap_method( - self.create_endpoint, default_timeout=None, client_info=client_info, + self.create_endpoint, default_timeout=5.0, client_info=client_info, ), self.get_endpoint: gapic_v1.method.wrap_method( - self.get_endpoint, default_timeout=None, client_info=client_info, + self.get_endpoint, default_timeout=5.0, client_info=client_info, ), self.list_endpoints: gapic_v1.method.wrap_method( - self.list_endpoints, default_timeout=None, client_info=client_info, + self.list_endpoints, default_timeout=5.0, client_info=client_info, ), self.update_endpoint: gapic_v1.method.wrap_method( - self.update_endpoint, default_timeout=None, client_info=client_info, + self.update_endpoint, default_timeout=5.0, client_info=client_info, ), self.delete_endpoint: gapic_v1.method.wrap_method( - self.delete_endpoint, default_timeout=None, client_info=client_info, + self.delete_endpoint, default_timeout=5.0, client_info=client_info, ), self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, default_timeout=None, client_info=client_info, + self.deploy_model, default_timeout=5.0, client_info=client_info, ), self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, default_timeout=None, client_info=client_info, + self.undeploy_model, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py index d2c13c3fe7..8a2c837161 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py @@ -108,7 +108,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -116,70 +119,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -187,18 +170,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -212,7 +185,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -247,7 +220,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py index ef97ba490f..d10160a493 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py @@ -63,7 +63,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -141,10 +141,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -153,7 +153,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -161,70 +164,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -232,18 +215,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index e253bcc5d6..91284c5bf6 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.job_service import pagers from google.cloud.aiplatform_v1.types import batch_prediction_job @@ -225,7 +225,7 @@ async def create_custom_job( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateCustomJobRequest`): The request object. Request message for - ``JobService.CreateCustomJob``. + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. parent (:class:`str`): Required. The resource name of the Location to create the CustomJob in. Format: @@ -282,7 +282,7 @@ async def create_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -312,7 +312,7 @@ async def get_custom_job( Args: request (:class:`google.cloud.aiplatform_v1.types.GetCustomJobRequest`): The request object. Request message for - ``JobService.GetCustomJob``. + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. name (:class:`str`): Required. The name of the CustomJob resource. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -361,7 +361,7 @@ async def get_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -391,7 +391,7 @@ async def list_custom_jobs( Args: request (:class:`google.cloud.aiplatform_v1.types.ListCustomJobsRequest`): The request object. Request message for - ``JobService.ListCustomJobs``. + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. parent (:class:`str`): Required. The resource name of the Location to list the CustomJobs from. Format: @@ -410,7 +410,7 @@ async def list_custom_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager: Response message for - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -438,7 +438,7 @@ async def list_custom_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_custom_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -474,7 +474,7 @@ async def delete_custom_job( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteCustomJobRequest`): The request object. Request message for - ``JobService.DeleteCustomJob``. + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. name (:class:`str`): Required. The name of the CustomJob resource to be deleted. Format: @@ -531,7 +531,7 @@ async def delete_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -567,21 +567,21 @@ async def cancel_custom_job( r"""Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` is + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1.types.CancelCustomJobRequest`): The request object. Request message for - ``JobService.CancelCustomJob``. + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. name (:class:`str`): Required. The name of the CustomJob to cancel. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -618,7 +618,7 @@ async def cancel_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -701,7 +701,7 @@ async def create_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -776,7 +776,7 @@ async def get_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -824,7 +824,7 @@ async def list_data_labeling_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: Response message for - ``JobService.ListDataLabelingJobs``. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. Iterating over this object will yield results and resolve additional pages automatically. @@ -852,7 +852,7 @@ async def list_data_labeling_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_data_labeling_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -888,7 +888,7 @@ async def delete_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest`): The request object. Request message for - ``JobService.DeleteDataLabelingJob``. + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. name (:class:`str`): Required. The name of the DataLabelingJob to be deleted. Format: @@ -946,7 +946,7 @@ async def delete_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1023,7 +1023,7 @@ async def cancel_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1053,7 +1053,7 @@ async def create_hyperparameter_tuning_job( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.CreateHyperparameterTuningJob``. + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. parent (:class:`str`): Required. The resource name of the Location to create the HyperparameterTuningJob in. Format: @@ -1108,7 +1108,7 @@ async def create_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1138,7 +1138,7 @@ async def get_hyperparameter_tuning_job( Args: request (:class:`google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.GetHyperparameterTuningJob``. + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. name (:class:`str`): Required. The name of the HyperparameterTuningJob resource. Format: @@ -1185,7 +1185,7 @@ async def get_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1215,7 +1215,7 @@ async def list_hyperparameter_tuning_jobs( Args: request (:class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest`): The request object. Request message for - ``JobService.ListHyperparameterTuningJobs``. + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. parent (:class:`str`): Required. The resource name of the Location to list the HyperparameterTuningJobs from. Format: @@ -1234,7 +1234,7 @@ async def list_hyperparameter_tuning_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: Response message for - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -1262,7 +1262,7 @@ async def list_hyperparameter_tuning_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_hyperparameter_tuning_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1298,7 +1298,7 @@ async def delete_hyperparameter_tuning_job( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.DeleteHyperparameterTuningJob``. + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. name (:class:`str`): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: @@ -1356,7 +1356,7 @@ async def delete_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1393,21 +1393,21 @@ async def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.CancelHyperparameterTuningJob``. + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. name (:class:`str`): Required. The name of the HyperparameterTuningJob to cancel. Format: @@ -1446,7 +1446,7 @@ async def cancel_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1477,7 +1477,7 @@ async def create_batch_prediction_job( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest`): The request object. Request message for - ``JobService.CreateBatchPredictionJob``. + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. parent (:class:`str`): Required. The resource name of the Location to create the BatchPredictionJob in. Format: @@ -1502,7 +1502,7 @@ async def create_batch_prediction_job( Returns: google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1534,7 +1534,7 @@ async def create_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1564,7 +1564,7 @@ async def get_batch_prediction_job( Args: request (:class:`google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest`): The request object. Request message for - ``JobService.GetBatchPredictionJob``. + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. name (:class:`str`): Required. The name of the BatchPredictionJob resource. Format: @@ -1583,7 +1583,7 @@ async def get_batch_prediction_job( Returns: google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1613,7 +1613,7 @@ async def get_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1643,7 +1643,7 @@ async def list_batch_prediction_jobs( Args: request (:class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest`): The request object. Request message for - ``JobService.ListBatchPredictionJobs``. + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. parent (:class:`str`): Required. The resource name of the Location to list the BatchPredictionJobs from. Format: @@ -1662,7 +1662,7 @@ async def list_batch_prediction_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: Response message for - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -1690,7 +1690,7 @@ async def list_batch_prediction_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_batch_prediction_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1727,7 +1727,7 @@ async def delete_batch_prediction_job( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest`): The request object. Request message for - ``JobService.DeleteBatchPredictionJob``. + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. name (:class:`str`): Required. The name of the BatchPredictionJob resource to be deleted. Format: @@ -1785,7 +1785,7 @@ async def delete_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1823,18 +1823,18 @@ async def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. Args: request (:class:`google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest`): The request object. Request message for - ``JobService.CancelBatchPredictionJob``. + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. name (:class:`str`): Required. The name of the BatchPredictionJob to cancel. Format: @@ -1873,7 +1873,7 @@ async def cancel_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 746ce91c4b..efdee645c8 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.job_service import pagers from google.cloud.aiplatform_v1.types import batch_prediction_job @@ -489,7 +489,7 @@ def create_custom_job( Args: request (google.cloud.aiplatform_v1.types.CreateCustomJobRequest): The request object. Request message for - ``JobService.CreateCustomJob``. + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. parent (str): Required. The resource name of the Location to create the CustomJob in. Format: @@ -577,7 +577,7 @@ def get_custom_job( Args: request (google.cloud.aiplatform_v1.types.GetCustomJobRequest): The request object. Request message for - ``JobService.GetCustomJob``. + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. name (str): Required. The name of the CustomJob resource. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -657,7 +657,7 @@ def list_custom_jobs( Args: request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): The request object. Request message for - ``JobService.ListCustomJobs``. + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. parent (str): Required. The resource name of the Location to list the CustomJobs from. Format: @@ -676,7 +676,7 @@ def list_custom_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager: Response message for - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -735,13 +735,13 @@ def delete_custom_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a CustomJob. Args: request (google.cloud.aiplatform_v1.types.DeleteCustomJobRequest): The request object. Request message for - ``JobService.DeleteCustomJob``. + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. name (str): Required. The name of the CustomJob resource to be deleted. Format: @@ -813,7 +813,7 @@ def delete_custom_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -835,21 +835,21 @@ def cancel_custom_job( r"""Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` is + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``. Args: request (google.cloud.aiplatform_v1.types.CancelCustomJobRequest): The request object. Request message for - ``JobService.CancelCustomJob``. + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. name (str): Required. The name of the CustomJob to cancel. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -1095,7 +1095,7 @@ def list_data_labeling_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager: Response message for - ``JobService.ListDataLabelingJobs``. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1154,13 +1154,13 @@ def delete_data_labeling_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a DataLabelingJob. Args: request (google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest): The request object. Request message for - ``JobService.DeleteDataLabelingJob``. + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. name (str): Required. The name of the DataLabelingJob to be deleted. Format: @@ -1233,7 +1233,7 @@ def delete_data_labeling_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -1327,7 +1327,7 @@ def create_hyperparameter_tuning_job( Args: request (google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.CreateHyperparameterTuningJob``. + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. parent (str): Required. The resource name of the Location to create the HyperparameterTuningJob in. Format: @@ -1415,7 +1415,7 @@ def get_hyperparameter_tuning_job( Args: request (google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.GetHyperparameterTuningJob``. + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. name (str): Required. The name of the HyperparameterTuningJob resource. Format: @@ -1495,7 +1495,7 @@ def list_hyperparameter_tuning_jobs( Args: request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): The request object. Request message for - ``JobService.ListHyperparameterTuningJobs``. + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. parent (str): Required. The resource name of the Location to list the HyperparameterTuningJobs from. Format: @@ -1514,7 +1514,7 @@ def list_hyperparameter_tuning_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager: Response message for - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -1575,13 +1575,13 @@ def delete_hyperparameter_tuning_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a HyperparameterTuningJob. Args: request (google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.DeleteHyperparameterTuningJob``. + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. name (str): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: @@ -1656,7 +1656,7 @@ def delete_hyperparameter_tuning_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -1679,21 +1679,21 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Args: request (google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.CancelHyperparameterTuningJob``. + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. name (str): Required. The name of the HyperparameterTuningJob to cancel. Format: @@ -1766,7 +1766,7 @@ def create_batch_prediction_job( Args: request (google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest): The request object. Request message for - ``JobService.CreateBatchPredictionJob``. + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. parent (str): Required. The resource name of the Location to create the BatchPredictionJob in. Format: @@ -1791,7 +1791,7 @@ def create_batch_prediction_job( Returns: google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1856,7 +1856,7 @@ def get_batch_prediction_job( Args: request (google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest): The request object. Request message for - ``JobService.GetBatchPredictionJob``. + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. name (str): Required. The name of the BatchPredictionJob resource. Format: @@ -1875,7 +1875,7 @@ def get_batch_prediction_job( Returns: google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1936,7 +1936,7 @@ def list_batch_prediction_jobs( Args: request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): The request object. Request message for - ``JobService.ListBatchPredictionJobs``. + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. parent (str): Required. The resource name of the Location to list the BatchPredictionJobs from. Format: @@ -1955,7 +1955,7 @@ def list_batch_prediction_jobs( Returns: google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager: Response message for - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -2016,14 +2016,14 @@ def delete_batch_prediction_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a BatchPredictionJob. Can only be called on jobs that already finished. Args: request (google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest): The request object. Request message for - ``JobService.DeleteBatchPredictionJob``. + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. name (str): Required. The name of the BatchPredictionJob resource to be deleted. Format: @@ -2098,7 +2098,7 @@ def delete_batch_prediction_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -2122,18 +2122,18 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. Args: request (google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest): The request object. Request message for - ``JobService.CancelBatchPredictionJob``. + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. name (str): Required. The name of the BatchPredictionJob to cancel. Format: diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py index 42ab8e1688..5cddf58749 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -84,10 +84,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -95,6 +95,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -104,111 +107,108 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_custom_job: gapic_v1.method.wrap_method( - self.create_custom_job, default_timeout=None, client_info=client_info, + self.create_custom_job, default_timeout=5.0, client_info=client_info, ), self.get_custom_job: gapic_v1.method.wrap_method( - self.get_custom_job, default_timeout=None, client_info=client_info, + self.get_custom_job, default_timeout=5.0, client_info=client_info, ), self.list_custom_jobs: gapic_v1.method.wrap_method( - self.list_custom_jobs, default_timeout=None, client_info=client_info, + self.list_custom_jobs, default_timeout=5.0, client_info=client_info, ), self.delete_custom_job: gapic_v1.method.wrap_method( - self.delete_custom_job, default_timeout=None, client_info=client_info, + self.delete_custom_job, default_timeout=5.0, client_info=client_info, ), self.cancel_custom_job: gapic_v1.method.wrap_method( - self.cancel_custom_job, default_timeout=None, client_info=client_info, + self.cancel_custom_job, default_timeout=5.0, client_info=client_info, ), self.create_data_labeling_job: gapic_v1.method.wrap_method( self.create_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_data_labeling_job: gapic_v1.method.wrap_method( self.get_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_data_labeling_jobs: gapic_v1.method.wrap_method( self.list_data_labeling_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_data_labeling_job: gapic_v1.method.wrap_method( self.delete_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_data_labeling_job: gapic_v1.method.wrap_method( self.cancel_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.create_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.get_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( self.list_hyperparameter_tuning_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.delete_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.cancel_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.create_batch_prediction_job: gapic_v1.method.wrap_method( self.create_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_batch_prediction_job: gapic_v1.method.wrap_method( self.get_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( self.list_batch_prediction_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_batch_prediction_job: gapic_v1.method.wrap_method( self.delete_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( self.cancel_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index a9c90ecdaa..ac94aff183 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -121,7 +121,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -129,70 +132,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -200,18 +183,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -225,7 +198,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -260,7 +233,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property @@ -393,15 +367,15 @@ def cancel_custom_job( Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` is + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``. Returns: @@ -698,15 +672,15 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Returns: @@ -855,11 +829,11 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py index f056094c9d..0b4943e563 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -76,7 +76,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -154,10 +154,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -166,7 +166,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -174,70 +177,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -245,18 +228,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: @@ -405,15 +378,15 @@ def cancel_custom_job( Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` is + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``. Returns: @@ -716,15 +689,15 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Returns: @@ -877,11 +850,11 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index e7f45eeaf5..6ddb72a2d2 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -206,7 +206,7 @@ async def search_migratable_resources( Args: request (:class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest`): The request object. Request message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. parent (:class:`str`): Required. The location that the migratable resources should be searched from. It's the AI Platform location @@ -227,7 +227,7 @@ async def search_migratable_resources( Returns: google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: Response message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. Iterating over this object will yield results and resolve additional pages automatically. @@ -296,7 +296,7 @@ async def batch_migrate_resources( Args: request (:class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest`): The request object. Request message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. parent (:class:`str`): Required. The location of the migrated resource will live in. Format: @@ -329,7 +329,7 @@ async def batch_migrate_resources( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` Response message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 0a23f262c2..75fa0ce0a7 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -196,32 +196,32 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Return a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parse a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Return a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parse a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod @@ -461,7 +461,7 @@ def search_migratable_resources( Args: request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): The request object. Request message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. parent (str): Required. The location that the migratable resources should be searched from. It's the AI Platform location @@ -482,7 +482,7 @@ def search_migratable_resources( Returns: google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager: Response message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. Iterating over this object will yield results and resolve additional pages automatically. @@ -554,7 +554,7 @@ def batch_migrate_resources( Args: request (google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest): The request object. Request message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. parent (str): Required. The location of the migrated resource will live in. Format: @@ -587,7 +587,7 @@ def batch_migrate_resources( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` Response message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py index da4cabae63..f10e4627c6 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -71,10 +71,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -82,6 +82,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -91,20 +94,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py index f11d72386d..b8cdb273a1 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -110,7 +110,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -118,70 +121,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -189,18 +172,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -214,7 +187,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -249,7 +222,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py index dbdddf31e5..190f45eac1 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py @@ -65,7 +65,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -143,10 +143,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -155,7 +155,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -163,70 +166,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -234,18 +217,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index 687c22455a..e1c69562f0 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.model_service import pagers from google.cloud.aiplatform_v1.types import deployed_model_ref @@ -209,7 +209,7 @@ async def upload_model( Args: request (:class:`google.cloud.aiplatform_v1.types.UploadModelRequest`): The request object. Request message for - ``ModelService.UploadModel``. + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. parent (:class:`str`): Required. The resource name of the Location into which to upload the Model. Format: @@ -237,7 +237,7 @@ async def upload_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` Response message of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] operation. """ @@ -265,7 +265,7 @@ async def upload_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.upload_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -303,7 +303,7 @@ async def get_model( Args: request (:class:`google.cloud.aiplatform_v1.types.GetModelRequest`): The request object. Request message for - ``ModelService.GetModel``. + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. name (:class:`str`): Required. The name of the Model resource. Format: ``projects/{project}/locations/{location}/models/{model}`` @@ -344,7 +344,7 @@ async def get_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -374,7 +374,7 @@ async def list_models( Args: request (:class:`google.cloud.aiplatform_v1.types.ListModelsRequest`): The request object. Request message for - ``ModelService.ListModels``. + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. parent (:class:`str`): Required. The resource name of the Location to list the Models from. Format: @@ -393,7 +393,7 @@ async def list_models( Returns: google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager: Response message for - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] Iterating over this object will yield results and resolve additional pages automatically. @@ -421,7 +421,7 @@ async def list_models( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_models, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -458,7 +458,7 @@ async def update_model( Args: request (:class:`google.cloud.aiplatform_v1.types.UpdateModelRequest`): The request object. Request message for - ``ModelService.UpdateModel``. + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. model (:class:`google.cloud.aiplatform_v1.types.Model`): Required. The Model which replaces the resource on the server. @@ -509,7 +509,7 @@ async def update_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -543,7 +543,7 @@ async def delete_model( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteModelRequest`): The request object. Request message for - ``ModelService.DeleteModel``. + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. name (:class:`str`): Required. The name of the Model resource to be deleted. Format: @@ -600,7 +600,7 @@ async def delete_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -642,7 +642,7 @@ async def export_model( Args: request (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest`): The request object. Request message for - ``ModelService.ExportModel``. + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. name (:class:`str`): Required. The resource name of the Model to export. Format: @@ -672,7 +672,7 @@ async def export_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` Response message of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] operation. """ @@ -700,7 +700,7 @@ async def export_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.export_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -738,7 +738,7 @@ async def get_model_evaluation( Args: request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationRequest`): The request object. Request message for - ``ModelService.GetModelEvaluation``. + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. name (:class:`str`): Required. The name of the ModelEvaluation resource. Format: @@ -785,7 +785,7 @@ async def get_model_evaluation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model_evaluation, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -815,7 +815,7 @@ async def list_model_evaluations( Args: request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest`): The request object. Request message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. parent (:class:`str`): Required. The resource name of the Model to list the ModelEvaluations from. Format: @@ -834,7 +834,7 @@ async def list_model_evaluations( Returns: google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager: Response message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. Iterating over this object will yield results and resolve additional pages automatically. @@ -862,7 +862,7 @@ async def list_model_evaluations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_model_evaluations, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -898,7 +898,7 @@ async def get_model_evaluation_slice( Args: request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest`): The request object. Request message for - ``ModelService.GetModelEvaluationSlice``. + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. name (:class:`str`): Required. The name of the ModelEvaluationSlice resource. Format: @@ -945,7 +945,7 @@ async def get_model_evaluation_slice( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model_evaluation_slice, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -975,7 +975,7 @@ async def list_model_evaluation_slices( Args: request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest`): The request object. Request message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. parent (:class:`str`): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: @@ -995,7 +995,7 @@ async def list_model_evaluation_slices( Returns: google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: Response message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1023,7 +1023,7 @@ async def list_model_evaluation_slices( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_model_evaluation_slices, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index fa75f3c22b..9f2de43306 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.model_service import pagers from google.cloud.aiplatform_v1.types import deployed_model_ref @@ -437,13 +437,13 @@ def upload_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Uploads a Model artifact into AI Platform. Args: request (google.cloud.aiplatform_v1.types.UploadModelRequest): The request object. Request message for - ``ModelService.UploadModel``. + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. parent (str): Required. The resource name of the Location into which to upload the Model. Format: @@ -471,7 +471,7 @@ def upload_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` Response message of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] operation. """ @@ -514,7 +514,7 @@ def upload_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, model_service.UploadModelResponse, @@ -538,7 +538,7 @@ def get_model( Args: request (google.cloud.aiplatform_v1.types.GetModelRequest): The request object. Request message for - ``ModelService.GetModel``. + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. name (str): Required. The name of the Model resource. Format: ``projects/{project}/locations/{location}/models/{model}`` @@ -610,7 +610,7 @@ def list_models( Args: request (google.cloud.aiplatform_v1.types.ListModelsRequest): The request object. Request message for - ``ModelService.ListModels``. + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. parent (str): Required. The resource name of the Location to list the Models from. Format: @@ -629,7 +629,7 @@ def list_models( Returns: google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager: Response message for - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] Iterating over this object will yield results and resolve additional pages automatically. @@ -695,7 +695,7 @@ def update_model( Args: request (google.cloud.aiplatform_v1.types.UpdateModelRequest): The request object. Request message for - ``ModelService.UpdateModel``. + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. model (google.cloud.aiplatform_v1.types.Model): Required. The Model which replaces the resource on the server. @@ -773,7 +773,7 @@ def delete_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a Model. Note: Model can only be deleted if there are no DeployedModels created from it. @@ -781,7 +781,7 @@ def delete_model( Args: request (google.cloud.aiplatform_v1.types.DeleteModelRequest): The request object. Request message for - ``ModelService.DeleteModel``. + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. name (str): Required. The name of the Model resource to be deleted. Format: @@ -853,7 +853,7 @@ def delete_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -872,7 +872,7 @@ def export_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Exports a trained, exportable, Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export @@ -881,7 +881,7 @@ def export_model( Args: request (google.cloud.aiplatform_v1.types.ExportModelRequest): The request object. Request message for - ``ModelService.ExportModel``. + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. name (str): Required. The resource name of the Model to export. Format: @@ -911,7 +911,7 @@ def export_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` Response message of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] operation. """ @@ -954,7 +954,7 @@ def export_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, model_service.ExportModelResponse, @@ -978,7 +978,7 @@ def get_model_evaluation( Args: request (google.cloud.aiplatform_v1.types.GetModelEvaluationRequest): The request object. Request message for - ``ModelService.GetModelEvaluation``. + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. name (str): Required. The name of the ModelEvaluation resource. Format: @@ -1056,7 +1056,7 @@ def list_model_evaluations( Args: request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): The request object. Request message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. parent (str): Required. The resource name of the Model to list the ModelEvaluations from. Format: @@ -1075,7 +1075,7 @@ def list_model_evaluations( Returns: google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager: Response message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1140,7 +1140,7 @@ def get_model_evaluation_slice( Args: request (google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest): The request object. Request message for - ``ModelService.GetModelEvaluationSlice``. + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. name (str): Required. The name of the ModelEvaluationSlice resource. Format: @@ -1220,7 +1220,7 @@ def list_model_evaluation_slices( Args: request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): The request object. Request message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. parent (str): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: @@ -1240,7 +1240,7 @@ def list_model_evaluation_slices( Returns: google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager: Response message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index d937f09a61..5252ac9c36 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -75,10 +75,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -86,6 +86,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -95,59 +98,54 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.upload_model: gapic_v1.method.wrap_method( - self.upload_model, default_timeout=None, client_info=client_info, + self.upload_model, default_timeout=5.0, client_info=client_info, ), self.get_model: gapic_v1.method.wrap_method( - self.get_model, default_timeout=None, client_info=client_info, + self.get_model, default_timeout=5.0, client_info=client_info, ), self.list_models: gapic_v1.method.wrap_method( - self.list_models, default_timeout=None, client_info=client_info, + self.list_models, default_timeout=5.0, client_info=client_info, ), self.update_model: gapic_v1.method.wrap_method( - self.update_model, default_timeout=None, client_info=client_info, + self.update_model, default_timeout=5.0, client_info=client_info, ), self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, default_timeout=None, client_info=client_info, + self.delete_model, default_timeout=5.0, client_info=client_info, ), self.export_model: gapic_v1.method.wrap_method( - self.export_model, default_timeout=None, client_info=client_info, + self.export_model, default_timeout=5.0, client_info=client_info, ), self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_timeout=None, - client_info=client_info, + self.get_model_evaluation, default_timeout=5.0, client_info=client_info, ), self.list_model_evaluations: gapic_v1.method.wrap_method( self.list_model_evaluations, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_model_evaluation_slice: gapic_v1.method.wrap_method( self.get_model_evaluation_slice, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_model_evaluation_slices: gapic_v1.method.wrap_method( self.list_model_evaluation_slices, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index b6f2efb427..92015d0848 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -112,7 +112,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -120,70 +123,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -191,18 +174,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -216,7 +189,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -251,7 +224,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index 2aeffea93f..2de86d2623 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -67,7 +67,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -145,10 +145,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -157,7 +157,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -165,70 +168,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -236,18 +219,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index fc7337a7a3..70315eb5de 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec @@ -203,7 +203,7 @@ async def create_training_pipeline( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.CreateTrainingPipeline``. + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. parent (:class:`str`): Required. The resource name of the Location to create the TrainingPipeline in. Format: @@ -232,7 +232,7 @@ async def create_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -260,7 +260,7 @@ async def create_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -290,7 +290,7 @@ async def get_training_pipeline( Args: request (:class:`google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.GetTrainingPipeline``. + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline resource. Format: @@ -313,7 +313,7 @@ async def get_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -339,7 +339,7 @@ async def get_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -369,7 +369,7 @@ async def list_training_pipelines( Args: request (:class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest`): The request object. Request message for - ``PipelineService.ListTrainingPipelines``. + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. parent (:class:`str`): Required. The resource name of the Location to list the TrainingPipelines from. Format: @@ -388,7 +388,7 @@ async def list_training_pipelines( Returns: google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: Response message for - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] Iterating over this object will yield results and resolve additional pages automatically. @@ -416,7 +416,7 @@ async def list_training_pipelines( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_training_pipelines, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -452,7 +452,7 @@ async def delete_training_pipeline( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.DeleteTrainingPipeline``. + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline resource to be deleted. Format: @@ -510,7 +510,7 @@ async def delete_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -546,21 +546,21 @@ async def cancel_training_pipeline( r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.CancelTrainingPipeline``. + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline to cancel. Format: @@ -599,7 +599,7 @@ async def cancel_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 39f37eb72e..388997af9d 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec @@ -405,7 +405,7 @@ def create_training_pipeline( Args: request (google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest): The request object. Request message for - ``PipelineService.CreateTrainingPipeline``. + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. parent (str): Required. The resource name of the Location to create the TrainingPipeline in. Format: @@ -434,7 +434,7 @@ def create_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -493,7 +493,7 @@ def get_training_pipeline( Args: request (google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest): The request object. Request message for - ``PipelineService.GetTrainingPipeline``. + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. name (str): Required. The name of the TrainingPipeline resource. Format: @@ -516,7 +516,7 @@ def get_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -573,7 +573,7 @@ def list_training_pipelines( Args: request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): The request object. Request message for - ``PipelineService.ListTrainingPipelines``. + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. parent (str): Required. The resource name of the Location to list the TrainingPipelines from. Format: @@ -592,7 +592,7 @@ def list_training_pipelines( Returns: google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager: Response message for - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] Iterating over this object will yield results and resolve additional pages automatically. @@ -651,13 +651,13 @@ def delete_training_pipeline( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a TrainingPipeline. Args: request (google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest): The request object. Request message for - ``PipelineService.DeleteTrainingPipeline``. + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. name (str): Required. The name of the TrainingPipeline resource to be deleted. Format: @@ -730,7 +730,7 @@ def delete_training_pipeline( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -752,21 +752,21 @@ def cancel_training_pipeline( r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. Args: request (google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest): The request object. Request message for - ``PipelineService.CancelTrainingPipeline``. + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. name (str): Required. The name of the TrainingPipeline to cancel. Format: diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index e4bc8e66a8..9d8f56b2ab 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -74,10 +74,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -85,6 +85,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -94,46 +97,43 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_training_pipeline: gapic_v1.method.wrap_method( self.create_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_training_pipeline: gapic_v1.method.wrap_method( self.get_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_training_pipelines: gapic_v1.method.wrap_method( self.list_training_pipelines, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_training_pipeline: gapic_v1.method.wrap_method( self.delete_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_training_pipeline: gapic_v1.method.wrap_method( self.cancel_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index b7d20db080..2e5af04a2c 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -111,7 +111,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -119,70 +122,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -190,18 +173,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -215,7 +188,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -250,7 +223,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property @@ -392,15 +366,15 @@ def cancel_training_pipeline( Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. Returns: diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index ceed94071f..747611c44b 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -66,7 +66,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -144,10 +144,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -156,7 +156,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -164,70 +167,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -235,18 +218,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: @@ -402,15 +375,15 @@ def cancel_training_pipeline( Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. Returns: diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index cc6d011e88..5d3654a498 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -185,7 +185,7 @@ async def predict( Args: request (:class:`google.cloud.aiplatform_v1.types.PredictRequest`): The request object. Request message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. endpoint (:class:`str`): Required. The name of the Endpoint requested to serve the prediction. Format: @@ -205,7 +205,7 @@ async def predict( Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this @@ -216,7 +216,7 @@ async def predict( DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this @@ -231,7 +231,7 @@ async def predict( Returns: google.cloud.aiplatform_v1.types.PredictResponse: Response message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. """ # Create or coerce a protobuf request object. @@ -261,7 +261,7 @@ async def predict( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.predict, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 029fb851b8..340c9dc16f 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -361,7 +361,7 @@ def predict( Args: request (google.cloud.aiplatform_v1.types.PredictRequest): The request object. Request message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. endpoint (str): Required. The name of the Endpoint requested to serve the prediction. Format: @@ -381,7 +381,7 @@ def predict( Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this @@ -392,7 +392,7 @@ def predict( DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this @@ -407,7 +407,7 @@ def predict( Returns: google.cloud.aiplatform_v1.types.PredictResponse: Response message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py index 311639daaf..bee77f7896 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -69,10 +69,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -80,6 +80,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -89,25 +92,22 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.predict: gapic_v1.method.wrap_method( - self.predict, default_timeout=None, client_info=client_info, + self.predict, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py index 86aef5e81a..f78e11bd2d 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -106,7 +106,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -114,70 +116,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -185,17 +167,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -209,7 +182,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -244,7 +217,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py index 620f340813..c9d5e2ba94 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -61,7 +61,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -139,10 +139,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -151,7 +151,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -159,70 +161,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -230,17 +212,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index 57e2b8a0a7..3cbd1325f2 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -204,7 +204,7 @@ async def create_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. parent (:class:`str`): Required. The parent Project name for the new SpecialistPool. The form is @@ -265,7 +265,7 @@ async def create_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -303,7 +303,7 @@ async def get_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.GetSpecialistPool``. + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. name (:class:`str`): Required. The name of the SpecialistPool resource. The form is @@ -357,7 +357,7 @@ async def get_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -387,7 +387,7 @@ async def list_specialist_pools( Args: request (:class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest`): The request object. Request message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. parent (:class:`str`): Required. The name of the SpecialistPool's parent resource. Format: @@ -406,7 +406,7 @@ async def list_specialist_pools( Returns: google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: Response message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. Iterating over this object will yield results and resolve additional pages automatically. @@ -434,7 +434,7 @@ async def list_specialist_pools( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_specialist_pools, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -471,7 +471,7 @@ async def delete_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.DeleteSpecialistPool``. + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. name (:class:`str`): Required. The resource name of the SpecialistPool to delete. Format: @@ -528,7 +528,7 @@ async def delete_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -567,7 +567,7 @@ async def update_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): Required. The SpecialistPool which replaces the resource on the server. @@ -627,7 +627,7 @@ async def update_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py index c6429b54f8..12d11c3b42 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -367,13 +367,13 @@ def create_specialist_pool( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Creates a SpecialistPool. Args: request (google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. parent (str): Required. The parent Project name for the new SpecialistPool. The form is @@ -449,7 +449,7 @@ def create_specialist_pool( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_specialist_pool.SpecialistPool, @@ -473,7 +473,7 @@ def get_specialist_pool( Args: request (google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.GetSpecialistPool``. + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. name (str): Required. The name of the SpecialistPool resource. The form is @@ -558,7 +558,7 @@ def list_specialist_pools( Args: request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): The request object. Request message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. parent (str): Required. The name of the SpecialistPool's parent resource. Format: @@ -577,7 +577,7 @@ def list_specialist_pools( Returns: google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: Response message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. Iterating over this object will yield results and resolve additional pages automatically. @@ -636,14 +636,14 @@ def delete_specialist_pool( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a SpecialistPool as well as all Specialists in the pool. Args: request (google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.DeleteSpecialistPool``. + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. name (str): Required. The resource name of the SpecialistPool to delete. Format: @@ -715,7 +715,7 @@ def delete_specialist_pool( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -734,13 +734,13 @@ def update_specialist_pool( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Updates a SpecialistPool. Args: request (google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): Required. The SpecialistPool which replaces the resource on the server. @@ -817,7 +817,7 @@ def update_specialist_pool( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_specialist_pool.SpecialistPool, diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py index 56de21b988..bf7e0209d7 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py @@ -72,10 +72,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -83,6 +83,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -92,44 +95,41 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_specialist_pool: gapic_v1.method.wrap_method( self.create_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_specialist_pool: gapic_v1.method.wrap_method( - self.get_specialist_pool, default_timeout=None, client_info=client_info, + self.get_specialist_pool, default_timeout=5.0, client_info=client_info, ), self.list_specialist_pools: gapic_v1.method.wrap_method( self.list_specialist_pools, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_specialist_pool: gapic_v1.method.wrap_method( self.delete_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.update_specialist_pool: gapic_v1.method.wrap_method( self.update_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py index cb8904bc07..97bb19e261 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py @@ -114,7 +114,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -122,70 +125,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -193,18 +176,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -218,7 +191,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -253,7 +226,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py index 566d0b022b..fd7766a767 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -69,7 +69,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -147,10 +147,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -159,7 +159,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -167,70 +170,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -238,18 +221,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1/types/annotation.py b/google/cloud/aiplatform_v1/types/annotation.py index 000ca49dcb..46b3eea8b5 100644 --- a/google/cloud/aiplatform_v1/types/annotation.py +++ b/google/cloud/aiplatform_v1/types/annotation.py @@ -38,17 +38,17 @@ class Annotation(proto.Message): payload_schema_uri (str): Required. Google Cloud Storage URI points to a YAML file describing - ``payload``. + [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is defined as an `OpenAPI 3.0.2 Schema Object `__. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's - ``metadata``. + [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]. payload (google.protobuf.struct_pb2.Value): Required. The schema of the payload can be found in - ``payload_schema``. + [payload_schema][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Annotation was created. diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index d2d8f02203..52be77e3b8 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -38,7 +38,7 @@ class BatchPredictionJob(proto.Message): r"""A job that uses a - ``Model`` to + [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If predictions for significant portion of the instances fail, the @@ -64,33 +64,33 @@ class BatchPredictionJob(proto.Message): may be specified via the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. model_parameters (google.protobuf.struct_pb2.Value): The parameters that govern the predictions. The schema of the parameters may be specified via the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. output_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputConfig): Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``instance_schema_uri`` + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] and - ``prediction_schema_uri``. + [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. dedicated_resources (google.cloud.aiplatform_v1.types.BatchDedicatedResources): The config of resources used by the Model during the batch prediction. If the Model - ``supports`` + [supports][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types] DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. manual_batch_tuning_parameters (google.cloud.aiplatform_v1.types.ManualBatchTuningParameters): Immutable. Parameters configuring the batch behavior. Currently only applicable when - ``dedicated_resources`` + [dedicated_resources][google.cloud.aiplatform.v1.BatchPredictionJob.dedicated_resources] are used (in other cases AI Platform does the tuning itself). output_info (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputInfo): @@ -150,9 +150,9 @@ class BatchPredictionJob(proto.Message): class InputConfig(proto.Message): r"""Configures the input to - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. See - ``Model.supported_input_storage_formats`` + [Model.supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] for Model's supported input formats, and how instances should be expressed via any of them. @@ -171,7 +171,7 @@ class InputConfig(proto.Message): Required. The format in which instances are given, must be one of the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - ``supported_input_storage_formats``. + [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats]. """ gcs_source = proto.Field( @@ -186,9 +186,9 @@ class InputConfig(proto.Message): class OutputConfig(proto.Message): r"""Configures the output of - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. See - ``Model.supported_output_storage_formats`` + [Model.supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats] for supported output formats, and how predictions are expressed via any of them. @@ -203,15 +203,15 @@ class OutputConfig(proto.Message): ``predictions_0002.``, ..., ``predictions_N.`` are created where ```` depends on chosen - ``predictions_format``, + [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format], and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both - ``instance`` + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] and - ``prediction`` + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] schemata defined then each such file contains predictions as per the - ``predictions_format``. + [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format]. If prediction for any instance failed (partially or completely), then an additional ``errors_0001.``, ``errors_0002.``,..., ``errors_N.`` @@ -230,9 +230,9 @@ class OutputConfig(proto.Message): YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, ``predictions``, and ``errors``. If the Model has both - ``instance`` + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] and - ``prediction`` + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] schemata defined then the tables have columns as follows: The ``predictions`` table contains instances for which the prediction succeeded, it has columns as per a concatenation @@ -247,7 +247,7 @@ class OutputConfig(proto.Message): predictions, must be one of the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - ``supported_output_storage_formats``. + [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. """ gcs_destination = proto.Field( @@ -265,7 +265,7 @@ class OutputConfig(proto.Message): class OutputInfo(proto.Message): r"""Further describes this job's output. Supplements - ``output_config``. + [output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. Attributes: gcs_output_directory (str): diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index c97cba6d82..ec0dbf3892 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -151,7 +151,7 @@ class CustomJobSpec(proto.Message): CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of - name ``id`` under its + name [id][google.cloud.aiplatform.v1.Trial.id] under its parent HyperparameterTuningJob's baseOutputDirectory. The following AI Platform environment variables will be diff --git a/google/cloud/aiplatform_v1/types/data_labeling_job.py b/google/cloud/aiplatform_v1/types/data_labeling_job.py index e1058737bf..414aa231ec 100644 --- a/google/cloud/aiplatform_v1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1/types/data_labeling_job.py @@ -120,7 +120,7 @@ class DataLabelingJob(proto.Message): - "aiplatform.googleapis.com/schema": output only, its value is the - ``inputs_schema``'s + [inputs_schema][google.cloud.aiplatform.v1.DataLabelingJob.inputs_schema_uri]'s title. specialist_pools (Sequence[str]): The SpecialistPools' resource names diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index 2f75dce0d5..97d244caf4 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -131,7 +131,7 @@ class ImportDataConfig(proto.Message): if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation labels specified inside index file referenced by - ``import_schema_uri``, + [import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri], e.g. jsonl file. import_schema_uri (str): Required. Points to a YAML file stored on Google Cloud @@ -172,7 +172,7 @@ class ExportDataConfig(proto.Message): to-be-exported DataItems(specified by [data_items_filter][]) that match this filter will be exported. The filter syntax is the same as in - ``ListAnnotations``. + [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. """ gcs_destination = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py index ccc8cce600..c02abc82ca 100644 --- a/google/cloud/aiplatform_v1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1/types/dataset_service.py @@ -52,7 +52,7 @@ class CreateDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. Attributes: parent (str): @@ -70,7 +70,7 @@ class CreateDatasetRequest(proto.Message): class CreateDatasetOperationMetadata(proto.Message): r"""Runtime operation information for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): @@ -84,7 +84,7 @@ class CreateDatasetOperationMetadata(proto.Message): class GetDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.GetDataset``. + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. Attributes: name (str): @@ -100,7 +100,7 @@ class GetDatasetRequest(proto.Message): class UpdateDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.UpdateDataset``. + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. Attributes: dataset (google.cloud.aiplatform_v1.types.Dataset): @@ -124,7 +124,7 @@ class UpdateDatasetRequest(proto.Message): class ListDatasetsRequest(proto.Message): r"""Request message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. Attributes: parent (str): @@ -178,7 +178,7 @@ class ListDatasetsRequest(proto.Message): class ListDatasetsResponse(proto.Message): r"""Response message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. Attributes: datasets (Sequence[google.cloud.aiplatform_v1.types.Dataset]): @@ -201,7 +201,7 @@ def raw_page(self): class DeleteDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.DeleteDataset``. + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. Attributes: name (str): @@ -215,7 +215,7 @@ class DeleteDatasetRequest(proto.Message): class ImportDataRequest(proto.Message): r"""Request message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. Attributes: name (str): @@ -236,13 +236,13 @@ class ImportDataRequest(proto.Message): class ImportDataResponse(proto.Message): r"""Response message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. """ class ImportDataOperationMetadata(proto.Message): r"""Runtime operation information for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): @@ -256,7 +256,7 @@ class ImportDataOperationMetadata(proto.Message): class ExportDataRequest(proto.Message): r"""Request message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. Attributes: name (str): @@ -275,7 +275,7 @@ class ExportDataRequest(proto.Message): class ExportDataResponse(proto.Message): r"""Response message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. Attributes: exported_files (Sequence[str]): @@ -288,7 +288,7 @@ class ExportDataResponse(proto.Message): class ExportDataOperationMetadata(proto.Message): r"""Runtime operation information for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): @@ -308,7 +308,7 @@ class ExportDataOperationMetadata(proto.Message): class ListDataItemsRequest(proto.Message): r"""Request message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. Attributes: parent (str): @@ -344,7 +344,7 @@ class ListDataItemsRequest(proto.Message): class ListDataItemsResponse(proto.Message): r"""Response message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. Attributes: data_items (Sequence[google.cloud.aiplatform_v1.types.DataItem]): @@ -367,7 +367,7 @@ def raw_page(self): class GetAnnotationSpecRequest(proto.Message): r"""Request message for - ``DatasetService.GetAnnotationSpec``. + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. Attributes: name (str): @@ -385,7 +385,7 @@ class GetAnnotationSpecRequest(proto.Message): class ListAnnotationsRequest(proto.Message): r"""Request message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. Attributes: parent (str): @@ -422,7 +422,7 @@ class ListAnnotationsRequest(proto.Message): class ListAnnotationsResponse(proto.Message): r"""Response message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. Attributes: annotations (Sequence[google.cloud.aiplatform_v1.types.Annotation]): diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index 5cbe3c1b1d..e2ceb4f7e3 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -45,9 +45,9 @@ class Endpoint(proto.Message): deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModel]): Output only. The models deployed in this Endpoint. To add or remove DeployedModels use - ``EndpointService.DeployModel`` + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel] and - ``EndpointService.UndeployModel`` + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel] respectively. traffic_split (Sequence[google.cloud.aiplatform_v1.types.Endpoint.TrafficSplitEntry]): A map from a DeployedModel's ID to the diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py index 24e00bd486..fd3f3b4c03 100644 --- a/google/cloud/aiplatform_v1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -45,7 +45,7 @@ class CreateEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. Attributes: parent (str): @@ -63,7 +63,7 @@ class CreateEndpointRequest(proto.Message): class CreateEndpointOperationMetadata(proto.Message): r"""Runtime operation information for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): @@ -77,7 +77,7 @@ class CreateEndpointOperationMetadata(proto.Message): class GetEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.GetEndpoint`` + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] Attributes: name (str): @@ -90,7 +90,7 @@ class GetEndpointRequest(proto.Message): class ListEndpointsRequest(proto.Message): r"""Request message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. Attributes: parent (str): @@ -124,9 +124,9 @@ class ListEndpointsRequest(proto.Message): page_token (str): Optional. The standard list page token. Typically obtained via - ``ListEndpointsResponse.next_page_token`` + [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1.ListEndpointsResponse.next_page_token] of the previous - ``EndpointService.ListEndpoints`` + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Optional. Mask specifying which fields to @@ -158,14 +158,14 @@ class ListEndpointsRequest(proto.Message): class ListEndpointsResponse(proto.Message): r"""Response message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. Attributes: endpoints (Sequence[google.cloud.aiplatform_v1.types.Endpoint]): List of Endpoints in the requested page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListEndpointsRequest.page_token`` + [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1.ListEndpointsRequest.page_token] to obtain that page. """ @@ -182,7 +182,7 @@ def raw_page(self): class UpdateEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. Attributes: endpoint (google.cloud.aiplatform_v1.types.Endpoint): @@ -200,7 +200,7 @@ class UpdateEndpointRequest(proto.Message): class DeleteEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.DeleteEndpoint``. + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. Attributes: name (str): @@ -214,7 +214,7 @@ class DeleteEndpointRequest(proto.Message): class DeployModelRequest(proto.Message): r"""Request message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. Attributes: endpoint (str): @@ -224,17 +224,17 @@ class DeployModelRequest(proto.Message): deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): Required. The DeployedModel to be created within the Endpoint. Note that - ``Endpoint.traffic_split`` + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by @@ -242,7 +242,7 @@ class DeployModelRequest(proto.Message): 100. If this field is empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] is not updated. """ @@ -257,7 +257,7 @@ class DeployModelRequest(proto.Message): class DeployModelResponse(proto.Message): r"""Response message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. Attributes: deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): @@ -272,7 +272,7 @@ class DeployModelResponse(proto.Message): class DeployModelOperationMetadata(proto.Message): r"""Runtime operation information for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): @@ -286,7 +286,7 @@ class DeployModelOperationMetadata(proto.Message): class UndeployModelRequest(proto.Message): r"""Request message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. Attributes: endpoint (str): @@ -298,7 +298,7 @@ class UndeployModelRequest(proto.Message): undeployed from the Endpoint. traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): If this field is provided, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A @@ -316,13 +316,13 @@ class UndeployModelRequest(proto.Message): class UndeployModelResponse(proto.Message): r"""Response message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. """ class UndeployModelOperationMetadata(proto.Message): r"""Runtime operation information for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py index 3a6d844ea7..b48fcfbf08 100644 --- a/google/cloud/aiplatform_v1/types/job_service.py +++ b/google/cloud/aiplatform_v1/types/job_service.py @@ -62,7 +62,7 @@ class CreateCustomJobRequest(proto.Message): r"""Request message for - ``JobService.CreateCustomJob``. + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. Attributes: parent (str): @@ -80,7 +80,7 @@ class CreateCustomJobRequest(proto.Message): class GetCustomJobRequest(proto.Message): r"""Request message for - ``JobService.GetCustomJob``. + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. Attributes: name (str): @@ -93,7 +93,7 @@ class GetCustomJobRequest(proto.Message): class ListCustomJobsRequest(proto.Message): r"""Request message for - ``JobService.ListCustomJobs``. + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. Attributes: parent (str): @@ -122,9 +122,9 @@ class ListCustomJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListCustomJobsResponse.next_page_token`` + [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListCustomJobsResponse.next_page_token] of the previous - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -143,14 +143,14 @@ class ListCustomJobsRequest(proto.Message): class ListCustomJobsResponse(proto.Message): r"""Response message for - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] Attributes: custom_jobs (Sequence[google.cloud.aiplatform_v1.types.CustomJob]): List of CustomJobs in the requested page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListCustomJobsRequest.page_token`` + [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1.ListCustomJobsRequest.page_token] to obtain that page. """ @@ -167,7 +167,7 @@ def raw_page(self): class DeleteCustomJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteCustomJob``. + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. Attributes: name (str): @@ -181,7 +181,7 @@ class DeleteCustomJobRequest(proto.Message): class CancelCustomJobRequest(proto.Message): r"""Request message for - ``JobService.CancelCustomJob``. + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. Attributes: name (str): @@ -280,7 +280,7 @@ class ListDataLabelingJobsRequest(proto.Message): class ListDataLabelingJobsResponse(proto.Message): r"""Response message for - ``JobService.ListDataLabelingJobs``. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. Attributes: data_labeling_jobs (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob]): @@ -303,7 +303,7 @@ def raw_page(self): class DeleteDataLabelingJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteDataLabelingJob``. + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. Attributes: name (str): @@ -332,7 +332,7 @@ class CancelDataLabelingJobRequest(proto.Message): class CreateHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.CreateHyperparameterTuningJob``. + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. Attributes: parent (str): @@ -355,7 +355,7 @@ class CreateHyperparameterTuningJobRequest(proto.Message): class GetHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.GetHyperparameterTuningJob``. + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. Attributes: name (str): @@ -370,7 +370,7 @@ class GetHyperparameterTuningJobRequest(proto.Message): class ListHyperparameterTuningJobsRequest(proto.Message): r"""Request message for - ``JobService.ListHyperparameterTuningJobs``. + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. Attributes: parent (str): @@ -399,9 +399,9 @@ class ListHyperparameterTuningJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListHyperparameterTuningJobsResponse.next_page_token`` + [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsResponse.next_page_token] of the previous - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -420,16 +420,16 @@ class ListHyperparameterTuningJobsRequest(proto.Message): class ListHyperparameterTuningJobsResponse(proto.Message): r"""Response message for - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] Attributes: hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob]): List of HyperparameterTuningJobs in the requested page. - ``HyperparameterTuningJob.trials`` + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1.HyperparameterTuningJob.trials] of the jobs will be not be returned. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListHyperparameterTuningJobsRequest.page_token`` + [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsRequest.page_token] to obtain that page. """ @@ -448,7 +448,7 @@ def raw_page(self): class DeleteHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteHyperparameterTuningJob``. + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. Attributes: name (str): @@ -463,7 +463,7 @@ class DeleteHyperparameterTuningJobRequest(proto.Message): class CancelHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.CancelHyperparameterTuningJob``. + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. Attributes: name (str): @@ -478,7 +478,7 @@ class CancelHyperparameterTuningJobRequest(proto.Message): class CreateBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.CreateBatchPredictionJob``. + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. Attributes: parent (str): @@ -498,7 +498,7 @@ class CreateBatchPredictionJobRequest(proto.Message): class GetBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.GetBatchPredictionJob``. + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. Attributes: name (str): @@ -513,7 +513,7 @@ class GetBatchPredictionJobRequest(proto.Message): class ListBatchPredictionJobsRequest(proto.Message): r"""Request message for - ``JobService.ListBatchPredictionJobs``. + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. Attributes: parent (str): @@ -542,9 +542,9 @@ class ListBatchPredictionJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListBatchPredictionJobsResponse.next_page_token`` + [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsResponse.next_page_token] of the previous - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -563,7 +563,7 @@ class ListBatchPredictionJobsRequest(proto.Message): class ListBatchPredictionJobsResponse(proto.Message): r"""Response message for - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] Attributes: batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob]): @@ -571,7 +571,7 @@ class ListBatchPredictionJobsResponse(proto.Message): page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListBatchPredictionJobsRequest.page_token`` + [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsRequest.page_token] to obtain that page. """ @@ -588,7 +588,7 @@ def raw_page(self): class DeleteBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteBatchPredictionJob``. + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. Attributes: name (str): @@ -603,7 +603,7 @@ class DeleteBatchPredictionJobRequest(proto.Message): class CancelBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.CancelBatchPredictionJob``. + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. Attributes: name (str): diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index f6864eb798..d828052afc 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -46,17 +46,17 @@ class MachineSpec(proto.Message): see https://tinyurl.com/aip-docs/training/configure-compute. For - ``DeployedModel`` + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] this field is optional, and the default value is ``n1-standard-2``. For - ``BatchPredictionJob`` + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob] or as part of - ``WorkerPoolSpec`` + [WorkerPoolSpec][google.cloud.aiplatform.v1.WorkerPoolSpec] this field is required. accelerator_type (google.cloud.aiplatform_v1.types.AcceleratorType): Immutable. The type of accelerator(s) that may be attached to the machine as per - ``accelerator_count``. + [accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count]. accelerator_count (int): The number of accelerators to attach to the machine. @@ -86,10 +86,10 @@ class DedicatedResources(proto.Message): against it increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. Note: if - ``machine_spec.accelerator_count`` + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] is above 0, currently the model will be always deployed precisely on - ``min_replica_count``. + [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count]. max_replica_count (int): Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If @@ -100,7 +100,7 @@ class DedicatedResources(proto.Message): beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use - ``min_replica_count`` + [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] as the default value. """ @@ -122,7 +122,7 @@ class AutomaticResources(proto.Message): Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to - ``max_replica_count``, + [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. @@ -161,7 +161,7 @@ class BatchDedicatedResources(proto.Message): Immutable. The number of machine replicas used at the start of the batch operation. If not set, AI Platform decides starting number, not greater than - ``max_replica_count`` + [max_replica_count][google.cloud.aiplatform.v1.BatchDedicatedResources.max_replica_count] max_replica_count (int): Immutable. The maximum number of machine replicas the batch operation may be scaled to. diff --git a/google/cloud/aiplatform_v1/types/migration_service.py b/google/cloud/aiplatform_v1/types/migration_service.py index acd69b37b4..ec2dbd6bc8 100644 --- a/google/cloud/aiplatform_v1/types/migration_service.py +++ b/google/cloud/aiplatform_v1/types/migration_service.py @@ -41,7 +41,7 @@ class SearchMigratableResourcesRequest(proto.Message): r"""Request message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. Attributes: parent (str): @@ -85,7 +85,7 @@ class SearchMigratableResourcesRequest(proto.Message): class SearchMigratableResourcesResponse(proto.Message): r"""Response message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. Attributes: migratable_resources (Sequence[google.cloud.aiplatform_v1.types.MigratableResource]): @@ -110,7 +110,7 @@ def raw_page(self): class BatchMigrateResourcesRequest(proto.Message): r"""Request message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. Attributes: parent (str): @@ -288,7 +288,7 @@ class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): class BatchMigrateResourcesResponse(proto.Message): r"""Response message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. Attributes: migrate_resource_responses (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceResponse]): @@ -325,7 +325,7 @@ class MigrateResourceResponse(proto.Message): class BatchMigrateResourcesOperationMetadata(proto.Message): r"""Runtime operation information for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): @@ -337,7 +337,7 @@ class BatchMigrateResourcesOperationMetadata(proto.Message): class PartialResult(proto.Message): r"""Represents a partial result in batch migration operation for one - ``MigrateResourceRequest``. + [MigrateResourceRequest][google.cloud.aiplatform.v1.MigrateResourceRequest]. Attributes: error (google.rpc.status_pb2.Status): diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index c2db797b98..7a2f1cf0dd 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -46,7 +46,7 @@ class Model(proto.Message): predict_schemata (google.cloud.aiplatform_v1.types.PredictSchemata): The schemata that describe formats of the Model's predictions and explanations as given and returned via - ``PredictionService.Predict`` + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] and [PredictionService.Explain][]. metadata_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud @@ -64,7 +64,7 @@ class Model(proto.Message): metadata (google.protobuf.struct_pb2.Value): Immutable. An additional information about the Model; the schema of the metadata can be found in - ``metadata_schema``. + [metadata_schema][google.cloud.aiplatform.v1.Model.metadata_schema_uri]. Unset if the Model does not have any additional information. supported_export_formats (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat]): Output only. The formats in which this Model @@ -78,7 +78,7 @@ class Model(proto.Message): Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon - ``ModelService.UploadModel``, + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], and all binaries it contains are copied and stored internally by AI Platform. Not present for AutoML Models. artifact_uri (str): @@ -89,71 +89,71 @@ class Model(proto.Message): Output only. When this Model is deployed, its prediction resources are described by the ``prediction_resources`` field of the - ``Endpoint.deployed_models`` + [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an - ``Endpoint`` and does not + [Endpoint][google.cloud.aiplatform.v1.Endpoint] and does not support online predictions - (``PredictionService.Predict`` + ([PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] or [PredictionService.Explain][]). Such a Model can serve predictions by using a - ``BatchPredictionJob``, + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob], if it has at least one entry each in - ``supported_input_storage_formats`` + [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] and - ``supported_output_storage_formats``. + [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. supported_input_storage_formats (Sequence[str]): Output only. The formats this Model supports in - ``BatchPredictionJob.input_config``. + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If - ``PredictSchemata.instance_schema_uri`` + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] exists, the instances should be given as per that schema. The possible formats are: - ``jsonl`` The JSON Lines format, where each instance is a single line. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - ``csv`` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - ``tf-record`` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - ``tf-record-gzip`` Similar to ``tf-record``, but the file is gzipped. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - ``bigquery`` Each instance is a single row in BigQuery. Uses - ``BigQuerySource``. + [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source]. - ``file-list`` Each line of the file is the location of an instance to process, uses ``gcs_source`` field of the - ``InputConfig`` + [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] object. If this Model doesn't support any of these formats it means it cannot be used with a - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. However, if it has - ``supported_deployment_resources_types``, + [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], it could serve online predictions by using - ``PredictionService.Predict`` + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] or [PredictionService.Explain][]. supported_output_storage_formats (Sequence[str]): Output only. The formats this Model supports in - ``BatchPredictionJob.output_config``. + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. If both - ``PredictSchemata.instance_schema_uri`` + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] and - ``PredictSchemata.prediction_schema_uri`` + [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri] exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction @@ -163,25 +163,25 @@ class Model(proto.Message): - ``jsonl`` The JSON Lines format, where each prediction is a single line. Uses - ``GcsDestination``. + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - ``csv`` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses - ``GcsDestination``. + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - ``bigquery`` Each prediction is a single row in a BigQuery table, uses - ``BigQueryDestination`` + [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination] . If this Model doesn't support any of these formats it means it cannot be used with a - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. However, if it has - ``supported_deployment_resources_types``, + [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], it could serve online predictions by using - ``PredictionService.Predict`` + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] or [PredictionService.Explain][]. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Model was @@ -313,18 +313,18 @@ class ExportableContent(proto.Enum): class PredictSchemata(proto.Message): r"""Contains the schemata used in Model's predictions and explanations via - ``PredictionService.Predict``, + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict], [PredictionService.Explain][] and - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. Attributes: instance_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in - ``PredictRequest.instances``, + [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances], [ExplainRequest.instances][] and - ``BatchPredictionJob.input_config``. + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -336,9 +336,9 @@ class PredictSchemata(proto.Message): Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via - ``PredictRequest.parameters``, + [PredictRequest.parameters][google.cloud.aiplatform.v1.PredictRequest.parameters], [ExplainRequest.parameters][] and - ``BatchPredictionJob.model_parameters``. + [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1.BatchPredictionJob.model_parameters]. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -351,9 +351,9 @@ class PredictSchemata(proto.Message): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via - ``PredictResponse.predictions``, + [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions], [ExplainResponse.explanations][], and - ``BatchPredictionJob.output_config``. + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -386,7 +386,7 @@ class ModelContainerSpec(proto.Message): `here `__. The container image is ingested upon - ``ModelService.UploadModel``, + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], stored internally, and this original path is afterwards not used. @@ -403,7 +403,7 @@ class ModelContainerSpec(proto.Message): If you do not specify this field, then the container's ``ENTRYPOINT`` runs, in conjunction with the - ``args`` + [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] field or the container's ```CMD`` `__, if either exists. If this field is not specified and the @@ -423,7 +423,7 @@ class ModelContainerSpec(proto.Message): by AI Platform `__ and environment variables set in the - ``env`` + [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: @@ -443,7 +443,7 @@ class ModelContainerSpec(proto.Message): similar to a Docker ``CMD``'s "default parameters" form. If you don't specify this field but do specify the - ``command`` + [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] field, then the command from the ``command`` field runs without any additional arguments. See the `Kubernetes documentation `__ about how @@ -461,7 +461,7 @@ class ModelContainerSpec(proto.Message): by AI Platform `__ and environment variables set in the - ``env`` + [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: @@ -479,9 +479,9 @@ class ModelContainerSpec(proto.Message): in the container can read these environment variables. Additionally, the - ``command`` + [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] and - ``args`` + [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable ``VAR_2`` to have the @@ -532,7 +532,7 @@ class ModelContainerSpec(proto.Message): predict_route (str): Immutable. HTTP path on the container to send prediction requests to. AI Platform forwards requests sent using - ``projects.locations.endpoints.predict`` + [projects.locations.endpoints.predict][google.cloud.aiplatform.v1.PredictionService.Predict] to this path on the container's IP address and port. AI Platform then returns the container's response in the API response. @@ -542,7 +542,7 @@ class ModelContainerSpec(proto.Message): request body in a POST request to the ``/foo`` path on the port of your container specified by the first value of this ``ModelContainerSpec``'s - ``ports`` + [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] field. If you don't specify this field, it defaults to the @@ -559,7 +559,7 @@ class ModelContainerSpec(proto.Message): environment variable.) - DEPLOYED_MODEL: - ``DeployedModel.id`` + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] of the ``DeployedModel``. (AI Platform makes this value available to your container code as the ```AIP_DEPLOYED_MODEL_ID`` environment @@ -575,7 +575,7 @@ class ModelContainerSpec(proto.Message): Platform intermittently sends a GET request to the ``/bar`` path on the port of your container specified by the first value of this ``ModelContainerSpec``'s - ``ports`` + [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] field. If you don't specify this field, it defaults to the @@ -592,7 +592,7 @@ class ModelContainerSpec(proto.Message): environment variable.) - DEPLOYED_MODEL: - ``DeployedModel.id`` + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] of the ``DeployedModel``. (AI Platform makes this value available to your container code as the ```AIP_DEPLOYED_MODEL_ID`` `__ diff --git a/google/cloud/aiplatform_v1/types/model_evaluation.py b/google/cloud/aiplatform_v1/types/model_evaluation.py index f617f3d197..1d3502079f 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation.py @@ -39,23 +39,23 @@ class ModelEvaluation(proto.Message): metrics_schema_uri (str): Output only. Points to a YAML file stored on Google Cloud Storage describing the - ``metrics`` + [metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Evaluation metrics of the Model. The schema of the metrics is stored in - ``metrics_schema_uri`` + [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluation.metrics_schema_uri] create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this ModelEvaluation was created. slice_dimensions (Sequence[str]): Output only. All possible - ``dimensions`` of + [dimensions][ModelEvaluationSlice.slice.dimension] of ModelEvaluationSlices. The dimensions can be used as the filter of the - ``ModelService.ListModelEvaluationSlices`` + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] request, in the form of ``slice.dimension = ``. """ diff --git a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py index 5653c3d2b6..5a9e0268a5 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py @@ -42,14 +42,14 @@ class ModelEvaluationSlice(proto.Message): metrics_schema_uri (str): Output only. Points to a YAML file stored on Google Cloud Storage describing the - ``metrics`` + [metrics][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics] of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in - ``metrics_schema_uri`` + [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics_schema_uri] create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this ModelEvaluationSlice was created. @@ -65,9 +65,9 @@ class Slice(proto.Message): - ``annotationSpec``: This slice is on the test data that has either ground truth or prediction with - ``AnnotationSpec.display_name`` + [AnnotationSpec.display_name][google.cloud.aiplatform.v1.AnnotationSpec.display_name] equals to - ``value``. + [value][google.cloud.aiplatform.v1.ModelEvaluationSlice.Slice.value]. value (str): Output only. The value of the dimension in this slice. diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index 454e014fd5..3cb791a739 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -52,7 +52,7 @@ class UploadModelRequest(proto.Message): r"""Request message for - ``ModelService.UploadModel``. + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. Attributes: parent (str): @@ -70,7 +70,7 @@ class UploadModelRequest(proto.Message): class UploadModelOperationMetadata(proto.Message): r"""Details of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] operation. Attributes: @@ -85,7 +85,7 @@ class UploadModelOperationMetadata(proto.Message): class UploadModelResponse(proto.Message): r"""Response message of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] operation. Attributes: @@ -99,7 +99,7 @@ class UploadModelResponse(proto.Message): class GetModelRequest(proto.Message): r"""Request message for - ``ModelService.GetModel``. + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. Attributes: name (str): @@ -112,7 +112,7 @@ class GetModelRequest(proto.Message): class ListModelsRequest(proto.Message): r"""Request message for - ``ModelService.ListModels``. + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. Attributes: parent (str): @@ -143,9 +143,9 @@ class ListModelsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListModelsResponse.next_page_token`` + [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token] of the previous - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -176,14 +176,14 @@ class ListModelsRequest(proto.Message): class ListModelsResponse(proto.Message): r"""Response message for - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] Attributes: models (Sequence[google.cloud.aiplatform_v1.types.Model]): List of Models in the requested page. next_page_token (str): A token to retrieve next page of results. Pass to - ``ListModelsRequest.page_token`` + [ListModelsRequest.page_token][google.cloud.aiplatform.v1.ListModelsRequest.page_token] to obtain that page. """ @@ -198,7 +198,7 @@ def raw_page(self): class UpdateModelRequest(proto.Message): r"""Request message for - ``ModelService.UpdateModel``. + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. Attributes: model (google.cloud.aiplatform_v1.types.Model): @@ -217,7 +217,7 @@ class UpdateModelRequest(proto.Message): class DeleteModelRequest(proto.Message): r"""Request message for - ``ModelService.DeleteModel``. + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. Attributes: name (str): @@ -231,7 +231,7 @@ class DeleteModelRequest(proto.Message): class ExportModelRequest(proto.Message): r"""Request message for - ``ModelService.ExportModel``. + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. Attributes: name (str): @@ -288,7 +288,7 @@ class OutputConfig(proto.Message): class ExportModelOperationMetadata(proto.Message): r"""Details of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] operation. Attributes: @@ -301,7 +301,7 @@ class ExportModelOperationMetadata(proto.Message): class OutputInfo(proto.Message): r"""Further describes the output of the ExportModel. Supplements - ``ExportModelRequest.OutputConfig``. + [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1.ExportModelRequest.OutputConfig]. Attributes: artifact_output_uri (str): @@ -329,14 +329,14 @@ class OutputInfo(proto.Message): class ExportModelResponse(proto.Message): r"""Response message of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] operation. """ class GetModelEvaluationRequest(proto.Message): r"""Request message for - ``ModelService.GetModelEvaluation``. + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. Attributes: name (str): @@ -350,7 +350,7 @@ class GetModelEvaluationRequest(proto.Message): class ListModelEvaluationsRequest(proto.Message): r"""Request message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. Attributes: parent (str): @@ -363,9 +363,9 @@ class ListModelEvaluationsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListModelEvaluationsResponse.next_page_token`` + [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationsResponse.next_page_token] of the previous - ``ModelService.ListModelEvaluations`` + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -384,7 +384,7 @@ class ListModelEvaluationsRequest(proto.Message): class ListModelEvaluationsResponse(proto.Message): r"""Response message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. Attributes: model_evaluations (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation]): @@ -392,7 +392,7 @@ class ListModelEvaluationsResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - ``ListModelEvaluationsRequest.page_token`` + [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationsRequest.page_token] to obtain that page. """ @@ -409,7 +409,7 @@ def raw_page(self): class GetModelEvaluationSliceRequest(proto.Message): r"""Request message for - ``ModelService.GetModelEvaluationSlice``. + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. Attributes: name (str): @@ -424,7 +424,7 @@ class GetModelEvaluationSliceRequest(proto.Message): class ListModelEvaluationSlicesRequest(proto.Message): r"""Request message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. Attributes: parent (str): @@ -440,9 +440,9 @@ class ListModelEvaluationSlicesRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListModelEvaluationSlicesResponse.next_page_token`` + [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesResponse.next_page_token] of the previous - ``ModelService.ListModelEvaluationSlices`` + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -461,7 +461,7 @@ class ListModelEvaluationSlicesRequest(proto.Message): class ListModelEvaluationSlicesResponse(proto.Message): r"""Response message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. Attributes: model_evaluation_slices (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]): @@ -469,7 +469,7 @@ class ListModelEvaluationSlicesResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - ``ListModelEvaluationSlicesRequest.page_token`` + [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesRequest.page_token] to obtain that page. """ diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index b2c6d5bbe3..98e9f6c190 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -37,7 +37,7 @@ class CreateTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.CreateTrainingPipeline``. + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. Attributes: parent (str): @@ -57,7 +57,7 @@ class CreateTrainingPipelineRequest(proto.Message): class GetTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.GetTrainingPipeline``. + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. Attributes: name (str): @@ -71,7 +71,7 @@ class GetTrainingPipelineRequest(proto.Message): class ListTrainingPipelinesRequest(proto.Message): r"""Request message for - ``PipelineService.ListTrainingPipelines``. + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. Attributes: parent (str): @@ -98,9 +98,9 @@ class ListTrainingPipelinesRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListTrainingPipelinesResponse.next_page_token`` + [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token] of the previous - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -119,7 +119,7 @@ class ListTrainingPipelinesRequest(proto.Message): class ListTrainingPipelinesResponse(proto.Message): r"""Response message for - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] Attributes: training_pipelines (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline]): @@ -127,7 +127,7 @@ class ListTrainingPipelinesResponse(proto.Message): page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListTrainingPipelinesRequest.page_token`` + [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesRequest.page_token] to obtain that page. """ @@ -144,7 +144,7 @@ def raw_page(self): class DeleteTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.DeleteTrainingPipeline``. + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. Attributes: name (str): @@ -159,7 +159,7 @@ class DeleteTrainingPipelineRequest(proto.Message): class CancelTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.CancelTrainingPipeline``. + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. Attributes: name (str): diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py index 21a01372f4..d1d3ea3dd3 100644 --- a/google/cloud/aiplatform_v1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1/types/prediction_service.py @@ -29,7 +29,7 @@ class PredictRequest(proto.Message): r"""Request message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. Attributes: endpoint (str): @@ -47,13 +47,13 @@ class PredictRequest(proto.Message): DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. parameters (google.protobuf.struct_pb2.Value): The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. """ endpoint = proto.Field(proto.STRING, number=1) @@ -65,7 +65,7 @@ class PredictRequest(proto.Message): class PredictResponse(proto.Message): r"""Response message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. Attributes: predictions (Sequence[google.protobuf.struct_pb2.Value]): @@ -74,7 +74,7 @@ class PredictResponse(proto.Message): Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - ``prediction_schema_uri``. + [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. deployed_model_id (str): ID of the Endpoint's DeployedModel that served this prediction. diff --git a/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1/types/specialist_pool_service.py index 69e49bb355..7392d79f01 100644 --- a/google/cloud/aiplatform_v1/types/specialist_pool_service.py +++ b/google/cloud/aiplatform_v1/types/specialist_pool_service.py @@ -40,7 +40,7 @@ class CreateSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. Attributes: parent (str): @@ -60,7 +60,7 @@ class CreateSpecialistPoolRequest(proto.Message): class CreateSpecialistPoolOperationMetadata(proto.Message): r"""Runtime operation information for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): @@ -74,7 +74,7 @@ class CreateSpecialistPoolOperationMetadata(proto.Message): class GetSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.GetSpecialistPool``. + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. Attributes: name (str): @@ -89,7 +89,7 @@ class GetSpecialistPoolRequest(proto.Message): class ListSpecialistPoolsRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. Attributes: parent (str): @@ -99,9 +99,9 @@ class ListSpecialistPoolsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained by - ``ListSpecialistPoolsResponse.next_page_token`` + [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1.ListSpecialistPoolsResponse.next_page_token] of the previous - ``SpecialistPoolService.ListSpecialistPools`` + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools] call. Return first page if empty. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -119,7 +119,7 @@ class ListSpecialistPoolsRequest(proto.Message): class ListSpecialistPoolsResponse(proto.Message): r"""Response message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. Attributes: specialist_pools (Sequence[google.cloud.aiplatform_v1.types.SpecialistPool]): @@ -142,7 +142,7 @@ def raw_page(self): class DeleteSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.DeleteSpecialistPool``. + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. Attributes: name (str): @@ -163,7 +163,7 @@ class DeleteSpecialistPoolRequest(proto.Message): class UpdateSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. Attributes: specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): @@ -183,7 +183,7 @@ class UpdateSpecialistPoolRequest(proto.Message): class UpdateSpecialistPoolOperationMetadata(proto.Message): r"""Runtime operation metadata for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. Attributes: specialist_pool (str): diff --git a/google/cloud/aiplatform_v1/types/training_pipeline.py b/google/cloud/aiplatform_v1/types/training_pipeline.py index 9a41f231a5..0964e87cd4 100644 --- a/google/cloud/aiplatform_v1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -44,7 +44,7 @@ class TrainingPipeline(proto.Message): r"""The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training - input, ``upload`` + input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. Attributes: @@ -57,11 +57,11 @@ class TrainingPipeline(proto.Message): input_data_config (google.cloud.aiplatform_v1.types.InputDataConfig): Specifies AI Platform owned input data that may be used for training the Model. The TrainingPipeline's - ``training_task_definition`` + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] should make clear whether this config is used and if there are any special requirements on how it should be filled. If nothing about this config is mentioned in the - ``training_task_definition``, + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], then it should be assumed that the TrainingPipeline does not depend on this configuration. training_task_definition (str): @@ -80,27 +80,27 @@ class TrainingPipeline(proto.Message): training_task_inputs (google.protobuf.struct_pb2.Value): Required. The training task's parameter(s), as specified in the - ``training_task_definition``'s + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s ``inputs``. training_task_metadata (google.protobuf.struct_pb2.Value): Output only. The metadata information as specified in the - ``training_task_definition``'s + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s ``metadata``. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's - ``training_task_definition`` + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] contains ``metadata`` object. model_to_upload (google.cloud.aiplatform_v1.types.Model): Describes the Model that may be uploaded (via - ``ModelService.UploadModel``) + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]) by this TrainingPipeline. The TrainingPipeline's - ``training_task_definition`` + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] should make clear whether this Model description should be populated, and if there are any special requirements regarding how it should be filled. If nothing is mentioned in the - ``training_task_definition``, + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], then it should be assumed that this field should not be filled and the training task either uploads the Model without a need of this information, or that training task @@ -108,7 +108,7 @@ class TrainingPipeline(proto.Message): When the Pipeline's state becomes ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been uploaded into AI Platform, then the model_to_upload's - resource ``name`` is + resource [name][google.cloud.aiplatform.v1.Model.name] is populated. The Model is always uploaded into the Project and Location in which this pipeline is. state (google.cloud.aiplatform_v1.types.PipelineState): @@ -146,7 +146,7 @@ class TrainingPipeline(proto.Message): Note: Model trained by this TrainingPipeline is also secured by this key if - ``model_to_upload`` + [model_to_upload][google.cloud.aiplatform.v1.TrainingPipeline.encryption_spec] is not set separately. """ @@ -272,7 +272,7 @@ class InputDataConfig(proto.Message): the DataItem they are on (for the auto-assigned that role is decided by AI Platform). A filter with same syntax as the one used in - ``ListAnnotations`` + [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations] may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. annotation_schema_uri (str): @@ -286,9 +286,9 @@ class InputDataConfig(proto.Message): schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the chosen schema must be consistent with - ``metadata`` + [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the Dataset specified by - ``dataset_id``. + [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id]. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in @@ -296,11 +296,11 @@ class InputDataConfig(proto.Message): the role of the DataItem they are on. When used in conjunction with - ``annotations_filter``, + [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter], the Annotations used for training are filtered by both - ``annotations_filter`` + [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter] and - ``annotation_schema_uri``. + [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]. """ fraction_split = proto.Field( @@ -377,7 +377,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in - ``DatasetService.ListDataItems`` + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, @@ -386,7 +386,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in - ``DatasetService.ListDataItems`` + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, @@ -395,7 +395,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in - ``DatasetService.ListDataItems`` + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 621f1e96f8..4ffc71f682 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -17,18 +17,28 @@ from .services.dataset_service import DatasetServiceClient from .services.endpoint_service import EndpointServiceClient +from .services.featurestore_online_serving_service import ( + FeaturestoreOnlineServingServiceClient, +) +from .services.featurestore_service import FeaturestoreServiceClient +from .services.index_endpoint_service import IndexEndpointServiceClient +from .services.index_service import IndexServiceClient from .services.job_service import JobServiceClient +from .services.metadata_service import MetadataServiceClient from .services.migration_service import MigrationServiceClient from .services.model_service import ModelServiceClient from .services.pipeline_service import PipelineServiceClient from .services.prediction_service import PredictionServiceClient from .services.specialist_pool_service import SpecialistPoolServiceClient +from .services.tensorboard_service import TensorboardServiceClient from .services.vizier_service import VizierServiceClient from .types.accelerator_type import AcceleratorType from .types.annotation import Annotation from .types.annotation_spec import AnnotationSpec +from .types.artifact import Artifact from .types.batch_prediction_job import BatchPredictionJob from .types.completion_stats import CompletionStats +from .types.context import Context from .types.custom_job import ContainerSpec from .types.custom_job import CustomJob from .types.custom_job import CustomJobSpec @@ -61,6 +71,7 @@ from .types.dataset_service import ListDatasetsRequest from .types.dataset_service import ListDatasetsResponse from .types.dataset_service import UpdateDatasetRequest +from .types.deployed_index_ref import DeployedIndexRef from .types.deployed_model_ref import DeployedModelRef from .types.encryption_spec import EncryptionSpec from .types.endpoint import DeployedModel @@ -78,7 +89,10 @@ from .types.endpoint_service import UndeployModelRequest from .types.endpoint_service import UndeployModelResponse from .types.endpoint_service import UpdateEndpointRequest +from .types.entity_type import EntityType from .types.env_var import EnvVar +from .types.event import Event +from .types.execution import Execution from .types.explanation import Attribution from .types.explanation import Explanation from .types.explanation import ExplanationMetadataOverride @@ -92,12 +106,92 @@ from .types.explanation import SmoothGradConfig from .types.explanation import XraiAttribution from .types.explanation_metadata import ExplanationMetadata +from .types.feature import Feature +from .types.feature_monitoring_stats import FeatureStatsAnomaly +from .types.feature_selector import FeatureSelector +from .types.feature_selector import IdMatcher +from .types.featurestore import Featurestore +from .types.featurestore_monitoring import FeaturestoreMonitoringConfig +from .types.featurestore_online_service import FeatureValue +from .types.featurestore_online_service import FeatureValueList +from .types.featurestore_online_service import ReadFeatureValuesRequest +from .types.featurestore_online_service import ReadFeatureValuesResponse +from .types.featurestore_online_service import StreamingReadFeatureValuesRequest +from .types.featurestore_service import BatchCreateFeaturesOperationMetadata +from .types.featurestore_service import BatchCreateFeaturesRequest +from .types.featurestore_service import BatchCreateFeaturesResponse +from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from .types.featurestore_service import BatchReadFeatureValuesRequest +from .types.featurestore_service import BatchReadFeatureValuesResponse +from .types.featurestore_service import CreateEntityTypeOperationMetadata +from .types.featurestore_service import CreateEntityTypeRequest +from .types.featurestore_service import CreateFeatureOperationMetadata +from .types.featurestore_service import CreateFeatureRequest +from .types.featurestore_service import CreateFeaturestoreOperationMetadata +from .types.featurestore_service import CreateFeaturestoreRequest +from .types.featurestore_service import DeleteEntityTypeRequest +from .types.featurestore_service import DeleteFeatureRequest +from .types.featurestore_service import DeleteFeaturestoreRequest +from .types.featurestore_service import DestinationFeatureSetting +from .types.featurestore_service import ExportFeatureValuesOperationMetadata +from .types.featurestore_service import ExportFeatureValuesRequest +from .types.featurestore_service import ExportFeatureValuesResponse +from .types.featurestore_service import FeatureValueDestination +from .types.featurestore_service import GetEntityTypeRequest +from .types.featurestore_service import GetFeatureRequest +from .types.featurestore_service import GetFeaturestoreRequest +from .types.featurestore_service import ImportFeatureValuesOperationMetadata +from .types.featurestore_service import ImportFeatureValuesRequest +from .types.featurestore_service import ImportFeatureValuesResponse +from .types.featurestore_service import ListEntityTypesRequest +from .types.featurestore_service import ListEntityTypesResponse +from .types.featurestore_service import ListFeaturesRequest +from .types.featurestore_service import ListFeaturesResponse +from .types.featurestore_service import ListFeaturestoresRequest +from .types.featurestore_service import ListFeaturestoresResponse +from .types.featurestore_service import SearchFeaturesRequest +from .types.featurestore_service import SearchFeaturesResponse +from .types.featurestore_service import UpdateEntityTypeRequest +from .types.featurestore_service import UpdateFeatureRequest +from .types.featurestore_service import UpdateFeaturestoreOperationMetadata +from .types.featurestore_service import UpdateFeaturestoreRequest from .types.hyperparameter_tuning_job import HyperparameterTuningJob +from .types.index import Index +from .types.index_endpoint import DeployedIndex +from .types.index_endpoint import DeployedIndexAuthConfig +from .types.index_endpoint import IndexEndpoint +from .types.index_endpoint import IndexPrivateEndpoints +from .types.index_endpoint_service import CreateIndexEndpointOperationMetadata +from .types.index_endpoint_service import CreateIndexEndpointRequest +from .types.index_endpoint_service import DeleteIndexEndpointRequest +from .types.index_endpoint_service import DeployIndexOperationMetadata +from .types.index_endpoint_service import DeployIndexRequest +from .types.index_endpoint_service import DeployIndexResponse +from .types.index_endpoint_service import GetIndexEndpointRequest +from .types.index_endpoint_service import ListIndexEndpointsRequest +from .types.index_endpoint_service import ListIndexEndpointsResponse +from .types.index_endpoint_service import UndeployIndexOperationMetadata +from .types.index_endpoint_service import UndeployIndexRequest +from .types.index_endpoint_service import UndeployIndexResponse +from .types.index_endpoint_service import UpdateIndexEndpointRequest +from .types.index_service import CreateIndexOperationMetadata +from .types.index_service import CreateIndexRequest +from .types.index_service import DeleteIndexRequest +from .types.index_service import GetIndexRequest +from .types.index_service import ListIndexesRequest +from .types.index_service import ListIndexesResponse +from .types.index_service import NearestNeighborSearchOperationMetadata +from .types.index_service import UpdateIndexOperationMetadata +from .types.index_service import UpdateIndexRequest +from .types.io import AvroSource from .types.io import BigQueryDestination from .types.io import BigQuerySource from .types.io import ContainerRegistryDestination +from .types.io import CsvDestination +from .types.io import CsvSource from .types.io import GcsDestination from .types.io import GcsSource +from .types.io import TFRecordDestination from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest from .types.job_service import CancelDataLabelingJobRequest @@ -106,14 +200,17 @@ from .types.job_service import CreateCustomJobRequest from .types.job_service import CreateDataLabelingJobRequest from .types.job_service import CreateHyperparameterTuningJobRequest +from .types.job_service import CreateModelDeploymentMonitoringJobRequest from .types.job_service import DeleteBatchPredictionJobRequest from .types.job_service import DeleteCustomJobRequest from .types.job_service import DeleteDataLabelingJobRequest from .types.job_service import DeleteHyperparameterTuningJobRequest +from .types.job_service import DeleteModelDeploymentMonitoringJobRequest from .types.job_service import GetBatchPredictionJobRequest from .types.job_service import GetCustomJobRequest from .types.job_service import GetDataLabelingJobRequest from .types.job_service import GetHyperparameterTuningJobRequest +from .types.job_service import GetModelDeploymentMonitoringJobRequest from .types.job_service import ListBatchPredictionJobsRequest from .types.job_service import ListBatchPredictionJobsResponse from .types.job_service import ListCustomJobsRequest @@ -122,7 +219,16 @@ from .types.job_service import ListDataLabelingJobsResponse from .types.job_service import ListHyperparameterTuningJobsRequest from .types.job_service import ListHyperparameterTuningJobsResponse +from .types.job_service import ListModelDeploymentMonitoringJobsRequest +from .types.job_service import ListModelDeploymentMonitoringJobsResponse +from .types.job_service import PauseModelDeploymentMonitoringJobRequest +from .types.job_service import ResumeModelDeploymentMonitoringJobRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse +from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata +from .types.job_service import UpdateModelDeploymentMonitoringJobRequest from .types.job_state import JobState +from .types.lineage_subgraph import LineageSubgraph from .types.machine_resources import AutomaticResources from .types.machine_resources import AutoscalingMetricSpec from .types.machine_resources import BatchDedicatedResources @@ -131,6 +237,44 @@ from .types.machine_resources import MachineSpec from .types.machine_resources import ResourcesConsumed from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from .types.metadata_schema import MetadataSchema +from .types.metadata_service import AddContextArtifactsAndExecutionsRequest +from .types.metadata_service import AddContextArtifactsAndExecutionsResponse +from .types.metadata_service import AddContextChildrenRequest +from .types.metadata_service import AddContextChildrenResponse +from .types.metadata_service import AddExecutionEventsRequest +from .types.metadata_service import AddExecutionEventsResponse +from .types.metadata_service import CreateArtifactRequest +from .types.metadata_service import CreateContextRequest +from .types.metadata_service import CreateExecutionRequest +from .types.metadata_service import CreateMetadataSchemaRequest +from .types.metadata_service import CreateMetadataStoreOperationMetadata +from .types.metadata_service import CreateMetadataStoreRequest +from .types.metadata_service import DeleteContextRequest +from .types.metadata_service import DeleteMetadataStoreOperationMetadata +from .types.metadata_service import DeleteMetadataStoreRequest +from .types.metadata_service import GetArtifactRequest +from .types.metadata_service import GetContextRequest +from .types.metadata_service import GetExecutionRequest +from .types.metadata_service import GetMetadataSchemaRequest +from .types.metadata_service import GetMetadataStoreRequest +from .types.metadata_service import ListArtifactsRequest +from .types.metadata_service import ListArtifactsResponse +from .types.metadata_service import ListContextsRequest +from .types.metadata_service import ListContextsResponse +from .types.metadata_service import ListExecutionsRequest +from .types.metadata_service import ListExecutionsResponse +from .types.metadata_service import ListMetadataSchemasRequest +from .types.metadata_service import ListMetadataSchemasResponse +from .types.metadata_service import ListMetadataStoresRequest +from .types.metadata_service import ListMetadataStoresResponse +from .types.metadata_service import QueryArtifactLineageSubgraphRequest +from .types.metadata_service import QueryContextLineageSubgraphRequest +from .types.metadata_service import QueryExecutionInputsAndOutputsRequest +from .types.metadata_service import UpdateArtifactRequest +from .types.metadata_service import UpdateContextRequest +from .types.metadata_service import UpdateExecutionRequest +from .types.metadata_store import MetadataStore from .types.migratable_resource import MigratableResource from .types.migration_service import BatchMigrateResourcesOperationMetadata from .types.migration_service import BatchMigrateResourcesRequest @@ -143,8 +287,26 @@ from .types.model import ModelContainerSpec from .types.model import Port from .types.model import PredictSchemata +from .types.model_deployment_monitoring_job import ( + ModelDeploymentMonitoringBigQueryTable, +) +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob +from .types.model_deployment_monitoring_job import ( + ModelDeploymentMonitoringObjectiveConfig, +) +from .types.model_deployment_monitoring_job import ( + ModelDeploymentMonitoringObjectiveType, +) +from .types.model_deployment_monitoring_job import ( + ModelDeploymentMonitoringScheduleConfig, +) +from .types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies from .types.model_evaluation import ModelEvaluation from .types.model_evaluation_slice import ModelEvaluationSlice +from .types.model_monitoring import ModelMonitoringAlertConfig +from .types.model_monitoring import ModelMonitoringObjectiveConfig +from .types.model_monitoring import SamplingStrategy +from .types.model_monitoring import ThresholdConfig from .types.model_service import DeleteModelRequest from .types.model_service import ExportModelOperationMetadata from .types.model_service import ExportModelRequest @@ -164,10 +326,20 @@ from .types.model_service import UploadModelResponse from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata +from .types.pipeline_job import PipelineJob +from .types.pipeline_job import PipelineJobDetail +from .types.pipeline_job import PipelineTaskDetail +from .types.pipeline_job import PipelineTaskExecutorDetail +from .types.pipeline_service import CancelPipelineJobRequest from .types.pipeline_service import CancelTrainingPipelineRequest +from .types.pipeline_service import CreatePipelineJobRequest from .types.pipeline_service import CreateTrainingPipelineRequest +from .types.pipeline_service import DeletePipelineJobRequest from .types.pipeline_service import DeleteTrainingPipelineRequest +from .types.pipeline_service import GetPipelineJobRequest from .types.pipeline_service import GetTrainingPipelineRequest +from .types.pipeline_service import ListPipelineJobsRequest +from .types.pipeline_service import ListPipelineJobsResponse from .types.pipeline_service import ListTrainingPipelinesRequest from .types.pipeline_service import ListTrainingPipelinesResponse from .types.pipeline_state import PipelineState @@ -188,13 +360,62 @@ from .types.study import Study from .types.study import StudySpec from .types.study import Trial +from .types.tensorboard import Tensorboard +from .types.tensorboard_data import Scalar +from .types.tensorboard_data import TensorboardBlob +from .types.tensorboard_data import TensorboardBlobSequence +from .types.tensorboard_data import TensorboardTensor +from .types.tensorboard_data import TimeSeriesData +from .types.tensorboard_data import TimeSeriesDataPoint +from .types.tensorboard_experiment import TensorboardExperiment +from .types.tensorboard_run import TensorboardRun +from .types.tensorboard_service import CreateTensorboardExperimentRequest +from .types.tensorboard_service import CreateTensorboardOperationMetadata +from .types.tensorboard_service import CreateTensorboardRequest +from .types.tensorboard_service import CreateTensorboardRunRequest +from .types.tensorboard_service import CreateTensorboardTimeSeriesRequest +from .types.tensorboard_service import DeleteTensorboardExperimentRequest +from .types.tensorboard_service import DeleteTensorboardRequest +from .types.tensorboard_service import DeleteTensorboardRunRequest +from .types.tensorboard_service import DeleteTensorboardTimeSeriesRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import GetTensorboardExperimentRequest +from .types.tensorboard_service import GetTensorboardRequest +from .types.tensorboard_service import GetTensorboardRunRequest +from .types.tensorboard_service import GetTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardExperimentsRequest +from .types.tensorboard_service import ListTensorboardExperimentsResponse +from .types.tensorboard_service import ListTensorboardRunsRequest +from .types.tensorboard_service import ListTensorboardRunsResponse +from .types.tensorboard_service import ListTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardTimeSeriesResponse +from .types.tensorboard_service import ListTensorboardsRequest +from .types.tensorboard_service import ListTensorboardsResponse +from .types.tensorboard_service import ReadTensorboardBlobDataRequest +from .types.tensorboard_service import ReadTensorboardBlobDataResponse +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import UpdateTensorboardExperimentRequest +from .types.tensorboard_service import UpdateTensorboardOperationMetadata +from .types.tensorboard_service import UpdateTensorboardRequest +from .types.tensorboard_service import UpdateTensorboardRunRequest +from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from .types.tensorboard_service import WriteTensorboardRunDataRequest +from .types.tensorboard_service import WriteTensorboardRunDataResponse +from .types.tensorboard_time_series import TensorboardTimeSeries from .types.training_pipeline import FilterSplit from .types.training_pipeline import FractionSplit from .types.training_pipeline import InputDataConfig from .types.training_pipeline import PredefinedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline +from .types.types import BoolArray +from .types.types import DoubleArray +from .types.types import Int64Array +from .types.types import StringArray from .types.user_action_reference import UserActionReference +from .types.value import Value from .types.vizier_service import AddTrialMeasurementRequest from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata from .types.vizier_service import CheckTrialEarlyStoppingStateRequest @@ -222,23 +443,39 @@ __all__ = ( "AcceleratorType", "ActiveLearningConfig", + "AddContextArtifactsAndExecutionsRequest", + "AddContextArtifactsAndExecutionsResponse", + "AddContextChildrenRequest", + "AddContextChildrenResponse", + "AddExecutionEventsRequest", + "AddExecutionEventsResponse", "AddTrialMeasurementRequest", "Annotation", "AnnotationSpec", + "Artifact", "Attribution", "AutomaticResources", "AutoscalingMetricSpec", + "AvroSource", + "BatchCreateFeaturesOperationMetadata", + "BatchCreateFeaturesRequest", + "BatchCreateFeaturesResponse", "BatchDedicatedResources", "BatchMigrateResourcesOperationMetadata", "BatchMigrateResourcesRequest", "BatchMigrateResourcesResponse", "BatchPredictionJob", + "BatchReadFeatureValuesOperationMetadata", + "BatchReadFeatureValuesRequest", + "BatchReadFeatureValuesResponse", "BigQueryDestination", "BigQuerySource", + "BoolArray", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", "CancelHyperparameterTuningJobRequest", + "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", @@ -247,19 +484,45 @@ "CompletionStats", "ContainerRegistryDestination", "ContainerSpec", + "Context", + "CreateArtifactRequest", "CreateBatchPredictionJobRequest", + "CreateContextRequest", "CreateCustomJobRequest", "CreateDataLabelingJobRequest", "CreateDatasetOperationMetadata", "CreateDatasetRequest", "CreateEndpointOperationMetadata", "CreateEndpointRequest", + "CreateEntityTypeOperationMetadata", + "CreateEntityTypeRequest", + "CreateExecutionRequest", + "CreateFeatureOperationMetadata", + "CreateFeatureRequest", + "CreateFeaturestoreOperationMetadata", + "CreateFeaturestoreRequest", "CreateHyperparameterTuningJobRequest", + "CreateIndexEndpointOperationMetadata", + "CreateIndexEndpointRequest", + "CreateIndexOperationMetadata", + "CreateIndexRequest", + "CreateMetadataSchemaRequest", + "CreateMetadataStoreOperationMetadata", + "CreateMetadataStoreRequest", + "CreateModelDeploymentMonitoringJobRequest", + "CreatePipelineJobRequest", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", "CreateStudyRequest", + "CreateTensorboardExperimentRequest", + "CreateTensorboardOperationMetadata", + "CreateTensorboardRequest", + "CreateTensorboardRunRequest", + "CreateTensorboardTimeSeriesRequest", "CreateTrainingPipelineRequest", "CreateTrialRequest", + "CsvDestination", + "CsvSource", "CustomJob", "CustomJobSpec", "DataItem", @@ -268,27 +531,52 @@ "DatasetServiceClient", "DedicatedResources", "DeleteBatchPredictionJobRequest", + "DeleteContextRequest", "DeleteCustomJobRequest", "DeleteDataLabelingJobRequest", "DeleteDatasetRequest", "DeleteEndpointRequest", + "DeleteEntityTypeRequest", + "DeleteFeatureRequest", + "DeleteFeaturestoreRequest", "DeleteHyperparameterTuningJobRequest", + "DeleteIndexEndpointRequest", + "DeleteIndexRequest", + "DeleteMetadataStoreOperationMetadata", + "DeleteMetadataStoreRequest", + "DeleteModelDeploymentMonitoringJobRequest", "DeleteModelRequest", "DeleteOperationMetadata", + "DeletePipelineJobRequest", "DeleteSpecialistPoolRequest", "DeleteStudyRequest", + "DeleteTensorboardExperimentRequest", + "DeleteTensorboardRequest", + "DeleteTensorboardRunRequest", + "DeleteTensorboardTimeSeriesRequest", "DeleteTrainingPipelineRequest", "DeleteTrialRequest", + "DeployIndexOperationMetadata", + "DeployIndexRequest", + "DeployIndexResponse", "DeployModelOperationMetadata", "DeployModelRequest", "DeployModelResponse", + "DeployedIndex", + "DeployedIndexAuthConfig", + "DeployedIndexRef", "DeployedModel", "DeployedModelRef", + "DestinationFeatureSetting", "DiskSpec", + "DoubleArray", "EncryptionSpec", "Endpoint", "EndpointServiceClient", + "EntityType", "EnvVar", + "Event", + "Execution", "ExplainRequest", "ExplainResponse", "Explanation", @@ -301,42 +589,88 @@ "ExportDataOperationMetadata", "ExportDataRequest", "ExportDataResponse", + "ExportFeatureValuesOperationMetadata", + "ExportFeatureValuesRequest", + "ExportFeatureValuesResponse", "ExportModelOperationMetadata", "ExportModelRequest", "ExportModelResponse", + "ExportTensorboardTimeSeriesDataRequest", + "ExportTensorboardTimeSeriesDataResponse", + "Feature", "FeatureNoiseSigma", + "FeatureSelector", + "FeatureStatsAnomaly", + "FeatureValue", + "FeatureValueDestination", + "FeatureValueList", + "Featurestore", + "FeaturestoreMonitoringConfig", + "FeaturestoreOnlineServingServiceClient", + "FeaturestoreServiceClient", "FilterSplit", "FractionSplit", "GcsDestination", "GcsSource", "GenericOperationMetadata", "GetAnnotationSpecRequest", + "GetArtifactRequest", "GetBatchPredictionJobRequest", + "GetContextRequest", "GetCustomJobRequest", "GetDataLabelingJobRequest", "GetDatasetRequest", "GetEndpointRequest", + "GetEntityTypeRequest", + "GetExecutionRequest", + "GetFeatureRequest", + "GetFeaturestoreRequest", "GetHyperparameterTuningJobRequest", + "GetIndexEndpointRequest", + "GetIndexRequest", + "GetMetadataSchemaRequest", + "GetMetadataStoreRequest", + "GetModelDeploymentMonitoringJobRequest", "GetModelEvaluationRequest", "GetModelEvaluationSliceRequest", "GetModelRequest", + "GetPipelineJobRequest", "GetSpecialistPoolRequest", "GetStudyRequest", + "GetTensorboardExperimentRequest", + "GetTensorboardRequest", + "GetTensorboardRunRequest", + "GetTensorboardTimeSeriesRequest", "GetTrainingPipelineRequest", "GetTrialRequest", "HyperparameterTuningJob", + "IdMatcher", "ImportDataConfig", "ImportDataOperationMetadata", "ImportDataRequest", "ImportDataResponse", + "ImportFeatureValuesOperationMetadata", + "ImportFeatureValuesRequest", + "ImportFeatureValuesResponse", + "Index", + "IndexEndpoint", + "IndexEndpointServiceClient", + "IndexPrivateEndpoints", + "IndexServiceClient", "InputDataConfig", + "Int64Array", "IntegratedGradientsAttribution", "JobServiceClient", "JobState", + "LineageSubgraph", "ListAnnotationsRequest", "ListAnnotationsResponse", + "ListArtifactsRequest", + "ListArtifactsResponse", "ListBatchPredictionJobsRequest", "ListBatchPredictionJobsResponse", + "ListContextsRequest", + "ListContextsResponse", "ListCustomJobsRequest", "ListCustomJobsResponse", "ListDataItemsRequest", @@ -347,8 +681,26 @@ "ListDatasetsResponse", "ListEndpointsRequest", "ListEndpointsResponse", + "ListEntityTypesRequest", + "ListEntityTypesResponse", + "ListExecutionsRequest", + "ListExecutionsResponse", + "ListFeaturesRequest", + "ListFeaturesResponse", + "ListFeaturestoresRequest", + "ListFeaturestoresResponse", "ListHyperparameterTuningJobsRequest", "ListHyperparameterTuningJobsResponse", + "ListIndexEndpointsRequest", + "ListIndexEndpointsResponse", + "ListIndexesRequest", + "ListIndexesResponse", + "ListMetadataSchemasRequest", + "ListMetadataSchemasResponse", + "ListMetadataStoresRequest", + "ListMetadataStoresResponse", + "ListModelDeploymentMonitoringJobsRequest", + "ListModelDeploymentMonitoringJobsResponse", "ListModelEvaluationSlicesRequest", "ListModelEvaluationSlicesResponse", "ListModelEvaluationsRequest", @@ -357,10 +709,20 @@ "ListModelsResponse", "ListOptimalTrialsRequest", "ListOptimalTrialsResponse", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", "ListSpecialistPoolsRequest", "ListSpecialistPoolsResponse", "ListStudiesRequest", "ListStudiesResponse", + "ListTensorboardExperimentsRequest", + "ListTensorboardExperimentsResponse", + "ListTensorboardRunsRequest", + "ListTensorboardRunsResponse", + "ListTensorboardTimeSeriesRequest", + "ListTensorboardTimeSeriesResponse", + "ListTensorboardsRequest", + "ListTensorboardsResponse", "ListTrainingPipelinesRequest", "ListTrainingPipelinesResponse", "ListTrialsRequest", @@ -369,18 +731,35 @@ "MachineSpec", "ManualBatchTuningParameters", "Measurement", + "MetadataSchema", + "MetadataServiceClient", + "MetadataStore", "MigratableResource", "MigrateResourceRequest", "MigrateResourceResponse", "MigrationServiceClient", "Model", "ModelContainerSpec", + "ModelDeploymentMonitoringBigQueryTable", + "ModelDeploymentMonitoringJob", + "ModelDeploymentMonitoringObjectiveConfig", + "ModelDeploymentMonitoringObjectiveType", + "ModelDeploymentMonitoringScheduleConfig", "ModelEvaluation", "ModelEvaluationSlice", "ModelExplanation", + "ModelMonitoringAlertConfig", + "ModelMonitoringObjectiveConfig", + "ModelMonitoringStatsAnomalies", "ModelServiceClient", + "NearestNeighborSearchOperationMetadata", + "PauseModelDeploymentMonitoringJobRequest", + "PipelineJob", + "PipelineJobDetail", "PipelineServiceClient", "PipelineState", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", "Port", "PredefinedSplit", "PredictRequest", @@ -388,38 +767,91 @@ "PredictSchemata", "PredictionServiceClient", "PythonPackageSpec", + "QueryArtifactLineageSubgraphRequest", + "QueryContextLineageSubgraphRequest", + "QueryExecutionInputsAndOutputsRequest", + "ReadFeatureValuesRequest", + "ReadFeatureValuesResponse", + "ReadTensorboardBlobDataRequest", + "ReadTensorboardBlobDataResponse", + "ReadTensorboardTimeSeriesDataRequest", + "ReadTensorboardTimeSeriesDataResponse", "ResourcesConsumed", + "ResumeModelDeploymentMonitoringJobRequest", "SampleConfig", "SampledShapleyAttribution", + "SamplingStrategy", + "Scalar", "Scheduling", + "SearchFeaturesRequest", + "SearchFeaturesResponse", "SearchMigratableResourcesRequest", "SearchMigratableResourcesResponse", + "SearchModelDeploymentMonitoringStatsAnomaliesRequest", + "SearchModelDeploymentMonitoringStatsAnomaliesResponse", "SmoothGradConfig", "SpecialistPool", "SpecialistPoolServiceClient", "StopTrialRequest", + "StreamingReadFeatureValuesRequest", + "StringArray", "Study", "StudySpec", "SuggestTrialsMetadata", "SuggestTrialsRequest", "SuggestTrialsResponse", + "TFRecordDestination", + "Tensorboard", + "TensorboardBlob", + "TensorboardBlobSequence", + "TensorboardExperiment", + "TensorboardRun", + "TensorboardServiceClient", + "TensorboardTensor", + "TensorboardTimeSeries", + "ThresholdConfig", + "TimeSeriesData", + "TimeSeriesDataPoint", "TimestampSplit", "TrainingConfig", "TrainingPipeline", "Trial", + "UndeployIndexOperationMetadata", + "UndeployIndexRequest", + "UndeployIndexResponse", "UndeployModelOperationMetadata", "UndeployModelRequest", "UndeployModelResponse", + "UpdateArtifactRequest", + "UpdateContextRequest", "UpdateDatasetRequest", "UpdateEndpointRequest", + "UpdateEntityTypeRequest", + "UpdateExecutionRequest", + "UpdateFeatureRequest", + "UpdateFeaturestoreOperationMetadata", + "UpdateFeaturestoreRequest", + "UpdateIndexEndpointRequest", + "UpdateIndexOperationMetadata", + "UpdateIndexRequest", + "UpdateModelDeploymentMonitoringJobOperationMetadata", + "UpdateModelDeploymentMonitoringJobRequest", "UpdateModelRequest", "UpdateSpecialistPoolOperationMetadata", "UpdateSpecialistPoolRequest", + "UpdateTensorboardExperimentRequest", + "UpdateTensorboardOperationMetadata", + "UpdateTensorboardRequest", + "UpdateTensorboardRunRequest", + "UpdateTensorboardTimeSeriesRequest", "UploadModelOperationMetadata", "UploadModelRequest", "UploadModelResponse", "UserActionReference", + "Value", "WorkerPoolSpec", + "WriteTensorboardRunDataRequest", + "WriteTensorboardRunDataResponse", "XraiAttribution", "VizierServiceClient", ) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index d91df4b644..5b3e917e98 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers from google.cloud.aiplatform_v1beta1.types import annotation @@ -205,7 +205,7 @@ async def create_dataset( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest`): The request object. Request message for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. parent (:class:`str`): Required. The resource name of the Location to create the Dataset in. Format: @@ -297,7 +297,7 @@ async def get_dataset( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetDatasetRequest`): The request object. Request message for - ``DatasetService.GetDataset``. + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. name (:class:`str`): Required. The name of the Dataset resource. @@ -371,7 +371,7 @@ async def update_dataset( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest`): The request object. Request message for - ``DatasetService.UpdateDataset``. + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): Required. The Dataset which replaces the resource on the server. @@ -461,7 +461,7 @@ async def list_datasets( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest`): The request object. Request message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. parent (:class:`str`): Required. The name of the Dataset's parent resource. Format: ``projects/{project}/locations/{location}`` @@ -479,7 +479,7 @@ async def list_datasets( Returns: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsAsyncPager: Response message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. Iterating over this object will yield results and resolve additional pages automatically. @@ -543,7 +543,7 @@ async def delete_dataset( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest`): The request object. Request message for - ``DatasetService.DeleteDataset``. + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. name (:class:`str`): Required. The resource name of the Dataset to delete. Format: @@ -639,7 +639,7 @@ async def import_data( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ImportDataRequest`): The request object. Request message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. name (:class:`str`): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -669,7 +669,7 @@ async def import_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` Response message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. """ # Create or coerce a protobuf request object. @@ -736,7 +736,7 @@ async def export_data( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ExportDataRequest`): The request object. Request message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. name (:class:`str`): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -765,7 +765,7 @@ async def export_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` Response message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. """ # Create or coerce a protobuf request object. @@ -830,7 +830,7 @@ async def list_data_items( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest`): The request object. Request message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. parent (:class:`str`): Required. The resource name of the Dataset to list DataItems from. Format: @@ -849,7 +849,7 @@ async def list_data_items( Returns: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsAsyncPager: Response message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. Iterating over this object will yield results and resolve additional pages automatically. @@ -913,7 +913,7 @@ async def get_annotation_spec( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest`): The request object. Request message for - ``DatasetService.GetAnnotationSpec``. + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. name (:class:`str`): Required. The name of the AnnotationSpec resource. Format: @@ -987,7 +987,7 @@ async def list_annotations( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest`): The request object. Request message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. parent (:class:`str`): Required. The resource name of the DataItem to list Annotations from. Format: @@ -1006,7 +1006,7 @@ async def list_annotations( Returns: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsAsyncPager: Response message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index 37aecfc5e5..4243557717 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers from google.cloud.aiplatform_v1beta1.types import annotation @@ -426,13 +426,13 @@ def create_dataset( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Creates a Dataset. Args: request (google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest): The request object. Request message for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. parent (str): Required. The resource name of the Location to create the Dataset in. Format: @@ -501,7 +501,7 @@ def create_dataset( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_dataset.Dataset, @@ -525,7 +525,7 @@ def get_dataset( Args: request (google.cloud.aiplatform_v1beta1.types.GetDatasetRequest): The request object. Request message for - ``DatasetService.GetDataset``. + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. name (str): Required. The name of the Dataset resource. @@ -600,7 +600,7 @@ def update_dataset( Args: request (google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest): The request object. Request message for - ``DatasetService.UpdateDataset``. + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. dataset (google.cloud.aiplatform_v1beta1.types.Dataset): Required. The Dataset which replaces the resource on the server. @@ -691,7 +691,7 @@ def list_datasets( Args: request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): The request object. Request message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. parent (str): Required. The name of the Dataset's parent resource. Format: ``projects/{project}/locations/{location}`` @@ -709,7 +709,7 @@ def list_datasets( Returns: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsPager: Response message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. Iterating over this object will yield results and resolve additional pages automatically. @@ -768,13 +768,13 @@ def delete_dataset( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a Dataset. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest): The request object. Request message for - ``DatasetService.DeleteDataset``. + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. name (str): Required. The resource name of the Dataset to delete. Format: @@ -846,7 +846,7 @@ def delete_dataset( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -865,13 +865,13 @@ def import_data( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Imports data into a Dataset. Args: request (google.cloud.aiplatform_v1beta1.types.ImportDataRequest): The request object. Request message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. name (str): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -901,7 +901,7 @@ def import_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` Response message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. """ # Create or coerce a protobuf request object. @@ -943,7 +943,7 @@ def import_data( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, dataset_service.ImportDataResponse, @@ -962,13 +962,13 @@ def export_data( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Exports data from a Dataset. Args: request (google.cloud.aiplatform_v1beta1.types.ExportDataRequest): The request object. Request message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. name (str): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -997,7 +997,7 @@ def export_data( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` Response message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. """ # Create or coerce a protobuf request object. @@ -1039,7 +1039,7 @@ def export_data( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, dataset_service.ExportDataResponse, @@ -1063,7 +1063,7 @@ def list_data_items( Args: request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): The request object. Request message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. parent (str): Required. The resource name of the Dataset to list DataItems from. Format: @@ -1082,7 +1082,7 @@ def list_data_items( Returns: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsPager: Response message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1147,7 +1147,7 @@ def get_annotation_spec( Args: request (google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest): The request object. Request message for - ``DatasetService.GetAnnotationSpec``. + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. name (str): Required. The name of the AnnotationSpec resource. Format: @@ -1222,7 +1222,7 @@ def list_annotations( Args: request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): The request object. Request message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. parent (str): Required. The resource name of the DataItem to list Annotations from. Format: @@ -1241,7 +1241,7 @@ def list_annotations( Returns: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsPager: Response message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index 56f567959a..75dc66a554 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -74,10 +74,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -85,6 +85,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -94,20 +97,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py index 4dae75d109..ca597a1e69 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -109,7 +109,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -117,70 +120,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -188,18 +171,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -213,7 +186,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -248,7 +221,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py index 0c38b2ec38..f51fe3bf1b 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -64,7 +64,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -142,10 +142,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -154,7 +154,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -162,70 +165,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -233,18 +216,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 05aa538225..1ca925e2d7 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec @@ -195,7 +195,7 @@ async def create_endpoint( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest`): The request object. Request message for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. parent (:class:`str`): Required. The resource name of the Location to create the Endpoint in. Format: @@ -286,7 +286,7 @@ async def get_endpoint( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetEndpointRequest`): The request object. Request message for - ``EndpointService.GetEndpoint`` + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] name (:class:`str`): Required. The name of the Endpoint resource. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` @@ -360,7 +360,7 @@ async def list_endpoints( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest`): The request object. Request message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. parent (:class:`str`): Required. The resource name of the Location from which to list the Endpoints. Format: @@ -379,7 +379,7 @@ async def list_endpoints( Returns: google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager: Response message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. Iterating over this object will yield results and resolve additional pages automatically. @@ -444,7 +444,7 @@ async def update_endpoint( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest`): The request object. Request message for - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): Required. The Endpoint which replaces the resource on the server. @@ -529,7 +529,7 @@ async def delete_endpoint( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest`): The request object. Request message for - ``EndpointService.DeleteEndpoint``. + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. name (:class:`str`): Required. The name of the Endpoint resource to be deleted. Format: @@ -629,7 +629,7 @@ async def deploy_model( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeployModelRequest`): The request object. Request message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. endpoint (:class:`str`): Required. The name of the Endpoint resource into which to deploy a Model. Format: @@ -641,10 +641,10 @@ async def deploy_model( deployed_model (:class:`google.cloud.aiplatform_v1beta1.types.DeployedModel`): Required. The DeployedModel to be created within the Endpoint. Note that - ``Endpoint.traffic_split`` + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this @@ -655,7 +655,7 @@ async def deploy_model( DeployedModel. If this field is non-empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its @@ -663,7 +663,7 @@ async def deploy_model( add up to 100. If this field is empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] is not updated. This corresponds to the ``traffic_split`` field @@ -683,7 +683,7 @@ async def deploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` Response message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. """ # Create or coerce a protobuf request object. @@ -757,7 +757,7 @@ async def undeploy_model( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.UndeployModelRequest`): The request object. Request message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. endpoint (:class:`str`): Required. The name of the Endpoint resource from which to undeploy a Model. Format: @@ -775,7 +775,7 @@ async def undeploy_model( should not be set. traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]`): If this field is provided, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when @@ -801,7 +801,7 @@ async def undeploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` Response message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index 1fdf1e506e..fa5add8a52 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec @@ -377,13 +377,13 @@ def create_endpoint( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Creates an Endpoint. Args: request (google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest): The request object. Request message for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. parent (str): Required. The resource name of the Location to create the Endpoint in. Format: @@ -451,7 +451,7 @@ def create_endpoint( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_endpoint.Endpoint, @@ -475,7 +475,7 @@ def get_endpoint( Args: request (google.cloud.aiplatform_v1beta1.types.GetEndpointRequest): The request object. Request message for - ``EndpointService.GetEndpoint`` + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] name (str): Required. The name of the Endpoint resource. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` @@ -550,7 +550,7 @@ def list_endpoints( Args: request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): The request object. Request message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. parent (str): Required. The resource name of the Location from which to list the Endpoints. Format: @@ -569,7 +569,7 @@ def list_endpoints( Returns: google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsPager: Response message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. Iterating over this object will yield results and resolve additional pages automatically. @@ -635,7 +635,7 @@ def update_endpoint( Args: request (google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest): The request object. Request message for - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): Required. The Endpoint which replaces the resource on the server. @@ -715,13 +715,13 @@ def delete_endpoint( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes an Endpoint. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest): The request object. Request message for - ``EndpointService.DeleteEndpoint``. + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. name (str): Required. The name of the Endpoint resource to be deleted. Format: @@ -793,7 +793,7 @@ def delete_endpoint( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -815,14 +815,14 @@ def deploy_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deploys a Model into this Endpoint, creating a DeployedModel within it. Args: request (google.cloud.aiplatform_v1beta1.types.DeployModelRequest): The request object. Request message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. endpoint (str): Required. The name of the Endpoint resource into which to deploy a Model. Format: @@ -834,10 +834,10 @@ def deploy_model( deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): Required. The DeployedModel to be created within the Endpoint. Note that - ``Endpoint.traffic_split`` + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this @@ -848,7 +848,7 @@ def deploy_model( DeployedModel. If this field is non-empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its @@ -856,7 +856,7 @@ def deploy_model( add up to 100. If this field is empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] is not updated. This corresponds to the ``traffic_split`` field @@ -876,7 +876,7 @@ def deploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` Response message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. """ # Create or coerce a protobuf request object. @@ -920,7 +920,7 @@ def deploy_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, endpoint_service.DeployModelResponse, @@ -942,7 +942,7 @@ def undeploy_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Undeploys a Model from an Endpoint, removing a DeployedModel from it, and freeing all resources it's using. @@ -950,7 +950,7 @@ def undeploy_model( Args: request (google.cloud.aiplatform_v1beta1.types.UndeployModelRequest): The request object. Request message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. endpoint (str): Required. The name of the Endpoint resource from which to undeploy a Model. Format: @@ -968,7 +968,7 @@ def undeploy_model( should not be set. traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): If this field is provided, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when @@ -994,7 +994,7 @@ def undeploy_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` Response message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. """ # Create or coerce a protobuf request object. @@ -1038,7 +1038,7 @@ def undeploy_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, endpoint_service.UndeployModelResponse, diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py index e55589de8f..9ff0668d04 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -73,10 +73,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -84,6 +84,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -93,20 +96,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py index 455ed12cf4..8943c2f3f0 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -108,7 +108,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -116,70 +119,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -187,18 +170,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -212,7 +185,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -247,7 +220,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py index a00971a72e..141168146d 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py @@ -63,7 +63,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -141,10 +141,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -153,7 +153,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -161,70 +164,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -232,18 +215,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py new file mode 100644 index 0000000000..8fca4944ab --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import FeaturestoreOnlineServingServiceClient +from .async_client import FeaturestoreOnlineServingServiceAsyncClient + +__all__ = ( + "FeaturestoreOnlineServingServiceClient", + "FeaturestoreOnlineServingServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py new file mode 100644 index 0000000000..cb29e164f7 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service + +from .transports.base import ( + FeaturestoreOnlineServingServiceTransport, + DEFAULT_CLIENT_INFO, +) +from .transports.grpc_asyncio import ( + FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, +) +from .client import FeaturestoreOnlineServingServiceClient + + +class FeaturestoreOnlineServingServiceAsyncClient: + """A service for serving online feature values.""" + + _client: FeaturestoreOnlineServingServiceClient + + DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod( + FeaturestoreOnlineServingServiceClient.entity_type_path + ) + parse_entity_type_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_entity_type_path + ) + + common_billing_account_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_folder_path + ) + parse_common_folder_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod( + FeaturestoreOnlineServingServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + FeaturestoreOnlineServingServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Return the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(FeaturestoreOnlineServingServiceClient).get_transport_class, + type(FeaturestoreOnlineServingServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[ + str, FeaturestoreOnlineServingServiceTransport + ] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = FeaturestoreOnlineServingServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def read_feature_values( + self, + request: featurestore_online_service.ReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_online_service.ReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def streaming_read_feature_values( + self, + request: featurestore_online_service.StreamingReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[ + AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse] + ]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (:class:`str`): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreOnlineServingServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py new file mode 100644 index 0000000000..63acf92e7e --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -0,0 +1,545 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service + +from .transports.base import ( + FeaturestoreOnlineServingServiceTransport, + DEFAULT_CLIENT_INFO, +) +from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .transports.grpc_asyncio import ( + FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, +) + + +class FeaturestoreOnlineServingServiceClientMeta(type): + """Metaclass for the FeaturestoreOnlineServingService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] + _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport + _transport_registry[ + "grpc_asyncio" + ] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[FeaturestoreOnlineServingServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreOnlineServingServiceClient( + metaclass=FeaturestoreOnlineServingServiceClientMeta +): + """A service for serving online feature values.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Return the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def entity_type_path( + project: str, location: str, featurestore: str, entity_type: str, + ) -> str: + """Return a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str, str]: + """Parse a entity_type path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreOnlineServingServiceTransport): + # transport is a FeaturestoreOnlineServingServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def read_feature_values( + self, + request: featurestore_online_service.ReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.ReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, featurestore_online_service.ReadFeatureValuesRequest + ): + request = featurestore_online_service.ReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def streaming_read_feature_values( + self, + request: featurestore_online_service.StreamingReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Args: + request (google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.StreamingReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, featurestore_online_service.StreamingReadFeatureValuesRequest + ): + request = featurestore_online_service.StreamingReadFeatureValuesRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.streaming_read_feature_values + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreOnlineServingServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py new file mode 100644 index 0000000000..fbb212cbc6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreOnlineServingServiceTransport +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] +_transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport +_transport_registry[ + "grpc_asyncio" +] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + +__all__ = ( + "FeaturestoreOnlineServingServiceTransport", + "FeaturestoreOnlineServingServiceGrpcTransport", + "FeaturestoreOnlineServingServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py new file mode 100644 index 0000000000..7cdcd29858 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FeaturestoreOnlineServingServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreOnlineServingService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=self._scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_feature_values: gapic_v1.method.wrap_method( + self.read_feature_values, default_timeout=5.0, client_info=client_info, + ), + self.streaming_read_feature_values: gapic_v1.method.wrap_method( + self.streaming_read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def read_feature_values( + self, + ) -> typing.Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + typing.Union[ + featurestore_online_service.ReadFeatureValuesResponse, + typing.Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ], + ]: + raise NotImplementedError() + + @property + def streaming_read_feature_values( + self, + ) -> typing.Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + typing.Union[ + featurestore_online_service.ReadFeatureValuesResponse, + typing.Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("FeaturestoreOnlineServingServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py new file mode 100644 index 0000000000..97b31e4acc --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service + +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreOnlineServingServiceGrpcTransport( + FeaturestoreOnlineServingServiceTransport +): + """gRPC backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse, + ]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_feature_values" not in self._stubs: + self._stubs["read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues", + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["read_feature_values"] + + @property + def streaming_read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse, + ]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_read_feature_values" not in self._stubs: + self._stubs[ + "streaming_read_feature_values" + ] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["streaming_read_feature_values"] + + +__all__ = ("FeaturestoreOnlineServingServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..5f92a32ab6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service + +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport + + +class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + FeaturestoreOnlineServingServiceTransport +): + """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_feature_values" not in self._stubs: + self._stubs["read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues", + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["read_feature_values"] + + @property + def streaming_read_feature_values( + self, + ) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse], + ]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_read_feature_values" not in self._stubs: + self._stubs[ + "streaming_read_feature_values" + ] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs["streaming_read_feature_values"] + + +__all__ = ("FeaturestoreOnlineServingServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py new file mode 100644 index 0000000000..86c61ed8cf --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import FeaturestoreServiceClient +from .async_client import FeaturestoreServiceAsyncClient + +__all__ = ( + "FeaturestoreServiceClient", + "FeaturestoreServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py new file mode 100644 index 0000000000..e9425b2be1 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -0,0 +1,2048 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport +from .client import FeaturestoreServiceClient + + +class FeaturestoreServiceAsyncClient: + """The service that handles CRUD and List for resources for + Featurestore. + """ + + _client: FeaturestoreServiceClient + + DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) + parse_entity_type_path = staticmethod( + FeaturestoreServiceClient.parse_entity_type_path + ) + feature_path = staticmethod(FeaturestoreServiceClient.feature_path) + parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) + featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) + parse_featurestore_path = staticmethod( + FeaturestoreServiceClient.parse_featurestore_path + ) + + common_billing_account_path = staticmethod( + FeaturestoreServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + FeaturestoreServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + FeaturestoreServiceClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + FeaturestoreServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + FeaturestoreServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) + parse_common_project_path = staticmethod( + FeaturestoreServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) + parse_common_location_path = staticmethod( + FeaturestoreServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Return the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(FeaturestoreServiceClient).get_transport_class, + type(FeaturestoreServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = FeaturestoreServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_featurestore( + self, + request: featurestore_service.CreateFeaturestoreRequest = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Featurestore in a given project and + location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + parent (:class:`str`): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.CreateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_featurestore( + self, + request: featurestore_service.GetFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + name (:class:`str`): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Featurestore: + Featurestore configuration + information on how the Featurestore is + configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.GetFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_featurestores( + self, + request: featurestore_service.ListFeaturestoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresAsyncPager: + r"""Lists Featurestores in a given project and location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest`): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + parent (:class:`str`): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ListFeaturestoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_featurestores, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturestoresAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_featurestore( + self, + request: featurestore_service.UpdateFeaturestoreRequest = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the parameters of a single Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``display_name`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.max_online_serving_size`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.UpdateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore.name", request.featurestore.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_featurestore( + self, + request: featurestore_service.DeleteFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + name (:class:`str`): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.DeleteFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_entity_type( + self, + request: featurestore_service.CreateEntityTypeRequest = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new EntityType in a given Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + parent (:class:`str`): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.CreateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_entity_type( + self, + request: featurestore_service.GetEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + name (:class:`str`): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.GetEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_entity_types( + self, + request: featurestore_service.ListEntityTypesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesAsyncPager: + r"""Lists EntityTypes in a given Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest`): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + parent (:class:`str`): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ListEntityTypesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_entity_types, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEntityTypesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_entity_type( + self, + request: featurestore_service.UpdateEntityTypeRequest = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.UpdateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type.name", request.entity_type.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_entity_type( + self, + request: featurestore_service.DeleteEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (:class:`str`): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.DeleteEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_feature( + self, + request: featurestore_service.CreateFeatureRequest = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Feature in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest`): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.CreateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_create_features( + self, + request: featurestore_service.BatchCreateFeaturesRequest = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a batch of Features in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]`): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.BatchCreateFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_feature( + self, + request: featurestore_service.GetFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetFeatureRequest`): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + name (:class:`str`): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.GetFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_features( + self, + request: featurestore_service.ListFeaturesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesAsyncPager: + r"""Lists Features in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + parent (:class:`str`): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ListFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_feature( + self, + request: featurestore_service.UpdateFeatureRequest = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest`): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.UpdateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("feature.name", request.feature.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_feature( + self, + request: featurestore_service.DeleteFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest`): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + name (:class:`str`): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.DeleteFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_feature_values( + self, + request: featurestore_service.ImportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ImportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_read_feature_values( + self, + request: featurestore_service.BatchReadFeatureValuesRequest = None, + *, + featurestore: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + (- Next Id: 6 -) + featurestore (:class:`str`): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.BatchReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore", request.featurestore),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_feature_values( + self, + request: featurestore_service.ExportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports Feature values from all the entities of a + target EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.ExportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def search_features( + self, + request: featurestore_service.SearchFeaturesRequest = None, + *, + location: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesAsyncPager: + r"""Searches Features matching a query in a given + project. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + location (:class:`str`): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = featurestore_service.SearchFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if location is not None: + request.location = location + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchFeaturesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py new file mode 100644 index 0000000000..89406353ea --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -0,0 +1,2279 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +class FeaturestoreServiceClientMeta(type): + """Metaclass for the FeaturestoreService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[FeaturestoreServiceTransport]] + _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[FeaturestoreServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): + """The service that handles CRUD and List for resources for + Featurestore. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Return the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def entity_type_path( + project: str, location: str, featurestore: str, entity_type: str, + ) -> str: + """Return a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str, str]: + """Parse a entity_type path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def feature_path( + project: str, location: str, featurestore: str, entity_type: str, feature: str, + ) -> str: + """Return a fully-qualified feature string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + feature=feature, + ) + + @staticmethod + def parse_feature_path(path: str) -> Dict[str, str]: + """Parse a feature path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def featurestore_path(project: str, location: str, featurestore: str,) -> str: + """Return a fully-qualified featurestore string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}".format( + project=project, location=location, featurestore=featurestore, + ) + + @staticmethod + def parse_featurestore_path(path: str) -> Dict[str, str]: + """Parse a featurestore path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, FeaturestoreServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreServiceTransport): + # transport is a FeaturestoreServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_featurestore( + self, + request: featurestore_service.CreateFeaturestoreRequest = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Featurestore in a given project and + location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): + request = featurestore_service.CreateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_featurestore( + self, + request: featurestore_service.GetFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + name (str): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Featurestore: + Featurestore configuration + information on how the Featurestore is + configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeaturestoreRequest): + request = featurestore_service.GetFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_featurestores( + self, + request: featurestore_service.ListFeaturestoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresPager: + r"""Lists Featurestores in a given project and location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturestoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturestoresRequest): + request = featurestore_service.ListFeaturestoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_featurestores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturestoresPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_featurestore( + self, + request: featurestore_service.UpdateFeaturestoreRequest = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates the parameters of a single Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``display_name`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.max_online_serving_size`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): + request = featurestore_service.UpdateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore.name", request.featurestore.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_featurestore( + self, + request: featurestore_service.DeleteFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): + request = featurestore_service.DeleteFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_entity_type( + self, + request: featurestore_service.CreateEntityTypeRequest = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new EntityType in a given Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + parent (str): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateEntityTypeRequest): + request = featurestore_service.CreateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + def get_entity_type( + self, + request: featurestore_service.GetEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetEntityTypeRequest): + request = featurestore_service.GetEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_entity_types( + self, + request: featurestore_service.ListEntityTypesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesPager: + r"""Lists EntityTypes in a given Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListEntityTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListEntityTypesRequest): + request = featurestore_service.ListEntityTypesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_entity_types] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEntityTypesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_entity_type( + self, + request: featurestore_service.UpdateEntityTypeRequest = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): + request = featurestore_service.UpdateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type.name", request.entity_type.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_entity_type( + self, + request: featurestore_service.DeleteEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (str): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): + request = featurestore_service.DeleteEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_feature( + self, + request: featurestore_service.CreateFeatureRequest = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Feature in a given EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + parent (str): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeatureRequest): + request = featurestore_service.CreateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_create_features( + self, + request: featurestore_service.BatchCreateFeaturesRequest = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a batch of Features in a given EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + parent (str): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchCreateFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): + request = featurestore_service.BatchCreateFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + def get_feature( + self, + request: featurestore_service.GetFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetFeatureRequest): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeatureRequest): + request = featurestore_service.GetFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_features( + self, + request: featurestore_service.ListFeaturesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesPager: + r"""Lists Features in a given EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturesRequest): + request = featurestore_service.ListFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_feature( + self, + request: featurestore_service.UpdateFeatureRequest = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeatureRequest): + request = featurestore_service.UpdateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("feature.name", request.feature.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_feature( + self, + request: featurestore_service.DeleteFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Feature. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + name (str): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeatureRequest): + request = featurestore_service.DeleteFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_feature_values( + self, + request: featurestore_service.ImportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ImportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): + request = featurestore_service.ImportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_read_feature_values( + self, + request: featurestore_service.BatchReadFeatureValuesRequest = None, + *, + featurestore: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Args: + request (google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + (- Next Id: 6 -) + featurestore (str): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): + request = featurestore_service.BatchReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.batch_read_feature_values + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("featurestore", request.featurestore),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def export_feature_values( + self, + request: featurestore_service.ExportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports Feature values from all the entities of a + target EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ExportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): + request = featurestore_service.ExportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("entity_type", request.entity_type),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def search_features( + self, + request: featurestore_service.SearchFeaturesRequest = None, + *, + location: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesPager: + r"""Searches Features matching a query in a given + project. + + Args: + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.SearchFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.SearchFeaturesRequest): + request = featurestore_service.SearchFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if location is not None: + request.location = location + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchFeaturesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("FeaturestoreServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py new file mode 100644 index 0000000000..98e6d56e17 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py @@ -0,0 +1,550 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service + + +class ListFeaturestoresPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.ListFeaturestoresResponse], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[featurestore.Featurestore]: + for page in self.pages: + yield from page.featurestores + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListFeaturestoresAsyncPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[featurestore_service.ListFeaturestoresResponse] + ], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[featurestore.Featurestore]: + async def async_generator(): + async for page in self.pages: + for response in page.featurestores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListEntityTypesPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.ListEntityTypesResponse], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[entity_type.EntityType]: + for page in self.pages: + yield from page.entity_types + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListEntityTypesAsyncPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[entity_type.EntityType]: + async def async_generator(): + async for page in self.pages: + for response in page.entity_types: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListFeaturesPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.ListFeaturesResponse], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListFeaturesAsyncPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchFeaturesPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., featurestore_service.SearchFeaturesResponse], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchFeaturesAsyncPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py new file mode 100644 index 0000000000..8f1772f264 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreServiceTransport +from .grpc import FeaturestoreServiceGrpcTransport +from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[FeaturestoreServiceTransport]] +_transport_registry["grpc"] = FeaturestoreServiceGrpcTransport +_transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport + +__all__ = ( + "FeaturestoreServiceTransport", + "FeaturestoreServiceGrpcTransport", + "FeaturestoreServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py new file mode 100644 index 0000000000..f47c31f203 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py @@ -0,0 +1,388 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FeaturestoreServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=self._scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_featurestore: gapic_v1.method.wrap_method( + self.create_featurestore, default_timeout=5.0, client_info=client_info, + ), + self.get_featurestore: gapic_v1.method.wrap_method( + self.get_featurestore, default_timeout=5.0, client_info=client_info, + ), + self.list_featurestores: gapic_v1.method.wrap_method( + self.list_featurestores, default_timeout=5.0, client_info=client_info, + ), + self.update_featurestore: gapic_v1.method.wrap_method( + self.update_featurestore, default_timeout=5.0, client_info=client_info, + ), + self.delete_featurestore: gapic_v1.method.wrap_method( + self.delete_featurestore, default_timeout=5.0, client_info=client_info, + ), + self.create_entity_type: gapic_v1.method.wrap_method( + self.create_entity_type, default_timeout=5.0, client_info=client_info, + ), + self.get_entity_type: gapic_v1.method.wrap_method( + self.get_entity_type, default_timeout=5.0, client_info=client_info, + ), + self.list_entity_types: gapic_v1.method.wrap_method( + self.list_entity_types, default_timeout=5.0, client_info=client_info, + ), + self.update_entity_type: gapic_v1.method.wrap_method( + self.update_entity_type, default_timeout=5.0, client_info=client_info, + ), + self.delete_entity_type: gapic_v1.method.wrap_method( + self.delete_entity_type, default_timeout=5.0, client_info=client_info, + ), + self.create_feature: gapic_v1.method.wrap_method( + self.create_feature, default_timeout=5.0, client_info=client_info, + ), + self.batch_create_features: gapic_v1.method.wrap_method( + self.batch_create_features, + default_timeout=5.0, + client_info=client_info, + ), + self.get_feature: gapic_v1.method.wrap_method( + self.get_feature, default_timeout=5.0, client_info=client_info, + ), + self.list_features: gapic_v1.method.wrap_method( + self.list_features, default_timeout=5.0, client_info=client_info, + ), + self.update_feature: gapic_v1.method.wrap_method( + self.update_feature, default_timeout=5.0, client_info=client_info, + ), + self.delete_feature: gapic_v1.method.wrap_method( + self.delete_feature, default_timeout=5.0, client_info=client_info, + ), + self.import_feature_values: gapic_v1.method.wrap_method( + self.import_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.batch_read_feature_values: gapic_v1.method.wrap_method( + self.batch_read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.export_feature_values: gapic_v1.method.wrap_method( + self.export_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.search_features: gapic_v1.method.wrap_method( + self.search_features, default_timeout=5.0, client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_featurestore( + self, + ) -> typing.Callable[ + [featurestore_service.CreateFeaturestoreRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_featurestore( + self, + ) -> typing.Callable[ + [featurestore_service.GetFeaturestoreRequest], + typing.Union[ + featurestore.Featurestore, typing.Awaitable[featurestore.Featurestore] + ], + ]: + raise NotImplementedError() + + @property + def list_featurestores( + self, + ) -> typing.Callable[ + [featurestore_service.ListFeaturestoresRequest], + typing.Union[ + featurestore_service.ListFeaturestoresResponse, + typing.Awaitable[featurestore_service.ListFeaturestoresResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_featurestore( + self, + ) -> typing.Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_featurestore( + self, + ) -> typing.Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def create_entity_type( + self, + ) -> typing.Callable[ + [featurestore_service.CreateEntityTypeRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_entity_type( + self, + ) -> typing.Callable[ + [featurestore_service.GetEntityTypeRequest], + typing.Union[entity_type.EntityType, typing.Awaitable[entity_type.EntityType]], + ]: + raise NotImplementedError() + + @property + def list_entity_types( + self, + ) -> typing.Callable[ + [featurestore_service.ListEntityTypesRequest], + typing.Union[ + featurestore_service.ListEntityTypesResponse, + typing.Awaitable[featurestore_service.ListEntityTypesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_entity_type( + self, + ) -> typing.Callable[ + [featurestore_service.UpdateEntityTypeRequest], + typing.Union[ + gca_entity_type.EntityType, typing.Awaitable[gca_entity_type.EntityType] + ], + ]: + raise NotImplementedError() + + @property + def delete_entity_type( + self, + ) -> typing.Callable[ + [featurestore_service.DeleteEntityTypeRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def create_feature( + self, + ) -> typing.Callable[ + [featurestore_service.CreateFeatureRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def batch_create_features( + self, + ) -> typing.Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_feature( + self, + ) -> typing.Callable[ + [featurestore_service.GetFeatureRequest], + typing.Union[feature.Feature, typing.Awaitable[feature.Feature]], + ]: + raise NotImplementedError() + + @property + def list_features( + self, + ) -> typing.Callable[ + [featurestore_service.ListFeaturesRequest], + typing.Union[ + featurestore_service.ListFeaturesResponse, + typing.Awaitable[featurestore_service.ListFeaturesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_feature( + self, + ) -> typing.Callable[ + [featurestore_service.UpdateFeatureRequest], + typing.Union[gca_feature.Feature, typing.Awaitable[gca_feature.Feature]], + ]: + raise NotImplementedError() + + @property + def delete_feature( + self, + ) -> typing.Callable[ + [featurestore_service.DeleteFeatureRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def import_feature_values( + self, + ) -> typing.Callable[ + [featurestore_service.ImportFeatureValuesRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def batch_read_feature_values( + self, + ) -> typing.Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def export_feature_values( + self, + ) -> typing.Callable[ + [featurestore_service.ExportFeatureValuesRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def search_features( + self, + ) -> typing.Callable[ + [featurestore_service.SearchFeaturesRequest], + typing.Union[ + featurestore_service.SearchFeaturesResponse, + typing.Awaitable[featurestore_service.SearchFeaturesResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("FeaturestoreServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py new file mode 100644 index 0000000000..27c255d8a6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py @@ -0,0 +1,830 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): + """gRPC backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore( + self, + ) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], operations.Operation + ]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_featurestore" not in self._stubs: + self._stubs["create_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore", + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_featurestore"] + + @property + def get_featurestore( + self, + ) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], featurestore.Featurestore + ]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + ~.Featurestore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_featurestore" not in self._stubs: + self._stubs["get_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore", + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs["get_featurestore"] + + @property + def list_featurestores( + self, + ) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + featurestore_service.ListFeaturestoresResponse, + ]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + ~.ListFeaturestoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_featurestores" not in self._stubs: + self._stubs["list_featurestores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores", + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs["list_featurestores"] + + @property + def update_featurestore( + self, + ) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], operations.Operation + ]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_featurestore" not in self._stubs: + self._stubs["update_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore", + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_featurestore"] + + @property + def delete_featurestore( + self, + ) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], operations.Operation + ]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_featurestore" not in self._stubs: + self._stubs["delete_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore", + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_featurestore"] + + @property + def create_entity_type( + self, + ) -> Callable[[featurestore_service.CreateEntityTypeRequest], operations.Operation]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_entity_type" not in self._stubs: + self._stubs["create_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType", + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_entity_type"] + + @property + def get_entity_type( + self, + ) -> Callable[[featurestore_service.GetEntityTypeRequest], entity_type.EntityType]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_entity_type" not in self._stubs: + self._stubs["get_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType", + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs["get_entity_type"] + + @property + def list_entity_types( + self, + ) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + featurestore_service.ListEntityTypesResponse, + ]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + ~.ListEntityTypesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_entity_types" not in self._stubs: + self._stubs["list_entity_types"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes", + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs["list_entity_types"] + + @property + def update_entity_type( + self, + ) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], gca_entity_type.EntityType + ]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_entity_type" not in self._stubs: + self._stubs["update_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType", + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs["update_entity_type"] + + @property + def delete_entity_type( + self, + ) -> Callable[[featurestore_service.DeleteEntityTypeRequest], operations.Operation]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_entity_type" not in self._stubs: + self._stubs["delete_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType", + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_entity_type"] + + @property + def create_feature( + self, + ) -> Callable[[featurestore_service.CreateFeatureRequest], operations.Operation]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_feature" not in self._stubs: + self._stubs["create_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature", + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_feature"] + + @property + def batch_create_features( + self, + ) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], operations.Operation + ]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_features" not in self._stubs: + self._stubs["batch_create_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures", + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_create_features"] + + @property + def get_feature( + self, + ) -> Callable[[featurestore_service.GetFeatureRequest], feature.Feature]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_feature" not in self._stubs: + self._stubs["get_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature", + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs["get_feature"] + + @property + def list_features( + self, + ) -> Callable[ + [featurestore_service.ListFeaturesRequest], + featurestore_service.ListFeaturesResponse, + ]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + ~.ListFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_features" not in self._stubs: + self._stubs["list_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures", + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs["list_features"] + + @property + def update_feature( + self, + ) -> Callable[[featurestore_service.UpdateFeatureRequest], gca_feature.Feature]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_feature" not in self._stubs: + self._stubs["update_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature", + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs["update_feature"] + + @property + def delete_feature( + self, + ) -> Callable[[featurestore_service.DeleteFeatureRequest], operations.Operation]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_feature" not in self._stubs: + self._stubs["delete_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature", + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_feature"] + + @property + def import_feature_values( + self, + ) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], operations.Operation + ]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_feature_values" not in self._stubs: + self._stubs["import_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues", + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["import_feature_values"] + + @property + def batch_read_feature_values( + self, + ) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], operations.Operation + ]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_read_feature_values" not in self._stubs: + self._stubs["batch_read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues", + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_read_feature_values"] + + @property + def export_feature_values( + self, + ) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], operations.Operation + ]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_feature_values" not in self._stubs: + self._stubs["export_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues", + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_feature_values"] + + @property + def search_features( + self, + ) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + featurestore_service.SearchFeaturesResponse, + ]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + ~.SearchFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_features" not in self._stubs: + self._stubs["search_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures", + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs["search_features"] + + +__all__ = ("FeaturestoreServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..148ac3c1a9 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py @@ -0,0 +1,857 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreServiceGrpcTransport + + +class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore( + self, + ) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_featurestore" not in self._stubs: + self._stubs["create_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore", + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_featurestore"] + + @property + def get_featurestore( + self, + ) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Awaitable[featurestore.Featurestore], + ]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + Awaitable[~.Featurestore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_featurestore" not in self._stubs: + self._stubs["get_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore", + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs["get_featurestore"] + + @property + def list_featurestores( + self, + ) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Awaitable[featurestore_service.ListFeaturestoresResponse], + ]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + Awaitable[~.ListFeaturestoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_featurestores" not in self._stubs: + self._stubs["list_featurestores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores", + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs["list_featurestores"] + + @property + def update_featurestore( + self, + ) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_featurestore" not in self._stubs: + self._stubs["update_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore", + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_featurestore"] + + @property + def delete_featurestore( + self, + ) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_featurestore" not in self._stubs: + self._stubs["delete_featurestore"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore", + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_featurestore"] + + @property + def create_entity_type( + self, + ) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_entity_type" not in self._stubs: + self._stubs["create_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType", + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_entity_type"] + + @property + def get_entity_type( + self, + ) -> Callable[ + [featurestore_service.GetEntityTypeRequest], Awaitable[entity_type.EntityType] + ]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_entity_type" not in self._stubs: + self._stubs["get_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType", + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs["get_entity_type"] + + @property + def list_entity_types( + self, + ) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Awaitable[featurestore_service.ListEntityTypesResponse], + ]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + Awaitable[~.ListEntityTypesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_entity_types" not in self._stubs: + self._stubs["list_entity_types"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes", + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs["list_entity_types"] + + @property + def update_entity_type( + self, + ) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Awaitable[gca_entity_type.EntityType], + ]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_entity_type" not in self._stubs: + self._stubs["update_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType", + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs["update_entity_type"] + + @property + def delete_entity_type( + self, + ) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_entity_type" not in self._stubs: + self._stubs["delete_entity_type"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType", + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_entity_type"] + + @property + def create_feature( + self, + ) -> Callable[ + [featurestore_service.CreateFeatureRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_feature" not in self._stubs: + self._stubs["create_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature", + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_feature"] + + @property + def batch_create_features( + self, + ) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_features" not in self._stubs: + self._stubs["batch_create_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures", + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_create_features"] + + @property + def get_feature( + self, + ) -> Callable[[featurestore_service.GetFeatureRequest], Awaitable[feature.Feature]]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_feature" not in self._stubs: + self._stubs["get_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature", + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs["get_feature"] + + @property + def list_features( + self, + ) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Awaitable[featurestore_service.ListFeaturesResponse], + ]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + Awaitable[~.ListFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_features" not in self._stubs: + self._stubs["list_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures", + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs["list_features"] + + @property + def update_feature( + self, + ) -> Callable[ + [featurestore_service.UpdateFeatureRequest], Awaitable[gca_feature.Feature] + ]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_feature" not in self._stubs: + self._stubs["update_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature", + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs["update_feature"] + + @property + def delete_feature( + self, + ) -> Callable[ + [featurestore_service.DeleteFeatureRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_feature" not in self._stubs: + self._stubs["delete_feature"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature", + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_feature"] + + @property + def import_feature_values( + self, + ) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_feature_values" not in self._stubs: + self._stubs["import_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues", + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["import_feature_values"] + + @property + def batch_read_feature_values( + self, + ) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_read_feature_values" not in self._stubs: + self._stubs["batch_read_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues", + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_read_feature_values"] + + @property + def export_feature_values( + self, + ) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_feature_values" not in self._stubs: + self._stubs["export_feature_values"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues", + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_feature_values"] + + @property + def search_features( + self, + ) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Awaitable[featurestore_service.SearchFeaturesResponse], + ]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + Awaitable[~.SearchFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_features" not in self._stubs: + self._stubs["search_features"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures", + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs["search_features"] + + +__all__ = ("FeaturestoreServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py new file mode 100644 index 0000000000..1eeda9dcdd --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import IndexEndpointServiceClient +from .async_client import IndexEndpointServiceAsyncClient + +__all__ = ( + "IndexEndpointServiceClient", + "IndexEndpointServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py new file mode 100644 index 0000000000..8f2ffd8555 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -0,0 +1,829 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport +from .client import IndexEndpointServiceClient + + +class IndexEndpointServiceAsyncClient: + """A service for managing AI Platform's IndexEndpoints.""" + + _client: IndexEndpointServiceClient + + DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexEndpointServiceClient.index_path) + parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod( + IndexEndpointServiceClient.parse_index_endpoint_path + ) + + common_billing_account_path = staticmethod( + IndexEndpointServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + IndexEndpointServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + IndexEndpointServiceClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + IndexEndpointServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + IndexEndpointServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod( + IndexEndpointServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod( + IndexEndpointServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Return the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(IndexEndpointServiceClient).get_transport_class, + type(IndexEndpointServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = IndexEndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_index_endpoint( + self, + request: index_endpoint_service.CreateIndexEndpointRequest = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.CreateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index_endpoint( + self, + request: index_endpoint_service.GetIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + name (:class:`str`): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.GetIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_index_endpoints( + self, + request: index_endpoint_service.ListIndexEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsAsyncPager: + r"""Lists IndexEndpoints in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest`): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.ListIndexEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_index_endpoints, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexEndpointsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index_endpoint( + self, + request: index_endpoint_service.UpdateIndexEndpointRequest = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + `FieldMask `__. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint.name", request.index_endpoint.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_index_endpoint( + self, + request: index_endpoint_service.DeleteIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + name (:class:`str`): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_index( + self, + request: index_endpoint_service.DeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeployIndexRequest`): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.DeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_index( + self, + request: index_endpoint_service.UndeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest`): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (:class:`str`): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.UndeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("IndexEndpointServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py new file mode 100644 index 0000000000..8c1d4626d6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -0,0 +1,1023 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexEndpointServiceGrpcTransport +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +class IndexEndpointServiceClientMeta(type): + """Metaclass for the IndexEndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[IndexEndpointServiceTransport]] + _transport_registry["grpc"] = IndexEndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[IndexEndpointServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): + """A service for managing AI Platform's IndexEndpoints.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Return the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def index_path(project: str, location: str, index: str,) -> str: + """Return a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format( + project=project, location=location, index=index, + ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str, str]: + """Parse a index path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str, location: str, index_endpoint: str,) -> str: + """Return a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format( + project=project, location=location, index_endpoint=index_endpoint, + ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str, str]: + """Parse a index_endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, IndexEndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexEndpointServiceTransport): + # transport is a IndexEndpointServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_index_endpoint( + self, + request: index_endpoint_service.CreateIndexEndpointRequest = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + parent (str): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.CreateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.CreateIndexEndpointRequest): + request = index_endpoint_service.CreateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index_endpoint( + self, + request: index_endpoint_service.GetIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + name (str): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.GetIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.GetIndexEndpointRequest): + request = index_endpoint_service.GetIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_index_endpoints( + self, + request: index_endpoint_service.ListIndexEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsPager: + r"""Lists IndexEndpoints in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.ListIndexEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.ListIndexEndpointsRequest): + request = index_endpoint_service.ListIndexEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_index_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexEndpointsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index_endpoint( + self, + request: index_endpoint_service.UpdateIndexEndpointRequest = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + `FieldMask `__. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UpdateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UpdateIndexEndpointRequest): + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint.name", request.index_endpoint.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_index_endpoint( + self, + request: index_endpoint_service.DeleteIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeleteIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeleteIndexEndpointRequest): + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_index( + self, + request: index_endpoint_service.DeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeployIndexRequest): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeployIndexRequest): + request = index_endpoint_service.DeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_index( + self, + request: index_endpoint_service.UndeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (str): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UndeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UndeployIndexRequest): + request = index_endpoint_service.UndeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("IndexEndpointServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py new file mode 100644 index 0000000000..ae7b2cdbf9 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service + + +class ListIndexEndpointsPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., index_endpoint_service.ListIndexEndpointsResponse], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[index_endpoint.IndexEndpoint]: + for page in self.pages: + yield from page.index_endpoints + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListIndexEndpointsAsyncPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[index_endpoint_service.ListIndexEndpointsResponse] + ], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[index_endpoint.IndexEndpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.index_endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..9ce68726cf --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexEndpointServiceTransport +from .grpc import IndexEndpointServiceGrpcTransport +from .grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[IndexEndpointServiceTransport]] +_transport_registry["grpc"] = IndexEndpointServiceGrpcTransport +_transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport + +__all__ = ( + "IndexEndpointServiceTransport", + "IndexEndpointServiceGrpcTransport", + "IndexEndpointServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py new file mode 100644 index 0000000000..4f73f79d73 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class IndexEndpointServiceTransport(abc.ABC): + """Abstract transport class for IndexEndpointService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=self._scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index_endpoint: gapic_v1.method.wrap_method( + self.create_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.get_index_endpoint: gapic_v1.method.wrap_method( + self.get_index_endpoint, default_timeout=5.0, client_info=client_info, + ), + self.list_index_endpoints: gapic_v1.method.wrap_method( + self.list_index_endpoints, default_timeout=5.0, client_info=client_info, + ), + self.update_index_endpoint: gapic_v1.method.wrap_method( + self.update_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_index_endpoint: gapic_v1.method.wrap_method( + self.delete_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_index: gapic_v1.method.wrap_method( + self.deploy_index, default_timeout=5.0, client_info=client_info, + ), + self.undeploy_index: gapic_v1.method.wrap_method( + self.undeploy_index, default_timeout=5.0, client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index_endpoint( + self, + ) -> typing.Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_index_endpoint( + self, + ) -> typing.Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + typing.Union[ + index_endpoint.IndexEndpoint, typing.Awaitable[index_endpoint.IndexEndpoint] + ], + ]: + raise NotImplementedError() + + @property + def list_index_endpoints( + self, + ) -> typing.Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + typing.Union[ + index_endpoint_service.ListIndexEndpointsResponse, + typing.Awaitable[index_endpoint_service.ListIndexEndpointsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_index_endpoint( + self, + ) -> typing.Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + typing.Union[ + gca_index_endpoint.IndexEndpoint, + typing.Awaitable[gca_index_endpoint.IndexEndpoint], + ], + ]: + raise NotImplementedError() + + @property + def delete_index_endpoint( + self, + ) -> typing.Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def deploy_index( + self, + ) -> typing.Callable[ + [index_endpoint_service.DeployIndexRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def undeploy_index( + self, + ) -> typing.Callable[ + [index_endpoint_service.UndeployIndexRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + +__all__ = ("IndexEndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..a41e483a61 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -0,0 +1,442 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): + """gRPC backend transport for IndexEndpointService. + + A service for managing AI Platform's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], operations.Operation + ]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_index_endpoint" not in self._stubs: + self._stubs["create_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint", + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_index_endpoint"] + + @property + def get_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], index_endpoint.IndexEndpoint + ]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_index_endpoint" not in self._stubs: + self._stubs["get_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint", + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs["get_index_endpoint"] + + @property + def list_index_endpoints( + self, + ) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + index_endpoint_service.ListIndexEndpointsResponse, + ]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + ~.ListIndexEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_index_endpoints" not in self._stubs: + self._stubs["list_index_endpoints"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints", + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs["list_index_endpoints"] + + @property + def update_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + gca_index_endpoint.IndexEndpoint, + ]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_index_endpoint" not in self._stubs: + self._stubs["update_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint", + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs["update_index_endpoint"] + + @property + def delete_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], operations.Operation + ]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_index_endpoint" not in self._stubs: + self._stubs["delete_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint", + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_index_endpoint"] + + @property + def deploy_index( + self, + ) -> Callable[[index_endpoint_service.DeployIndexRequest], operations.Operation]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy_index" not in self._stubs: + self._stubs["deploy_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex", + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["deploy_index"] + + @property + def undeploy_index( + self, + ) -> Callable[[index_endpoint_service.UndeployIndexRequest], operations.Operation]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undeploy_index" not in self._stubs: + self._stubs["undeploy_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex", + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["undeploy_index"] + + +__all__ = ("IndexEndpointServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..a34337a84f --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,455 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexEndpointServiceGrpcTransport + + +class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): + """gRPC AsyncIO backend transport for IndexEndpointService. + + A service for managing AI Platform's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_index_endpoint" not in self._stubs: + self._stubs["create_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint", + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_index_endpoint"] + + @property + def get_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + Awaitable[index_endpoint.IndexEndpoint], + ]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_index_endpoint" not in self._stubs: + self._stubs["get_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint", + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs["get_index_endpoint"] + + @property + def list_index_endpoints( + self, + ) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + Awaitable[index_endpoint_service.ListIndexEndpointsResponse], + ]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + Awaitable[~.ListIndexEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_index_endpoints" not in self._stubs: + self._stubs["list_index_endpoints"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints", + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs["list_index_endpoints"] + + @property + def update_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + Awaitable[gca_index_endpoint.IndexEndpoint], + ]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_index_endpoint" not in self._stubs: + self._stubs["update_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint", + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs["update_index_endpoint"] + + @property + def delete_index_endpoint( + self, + ) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_index_endpoint" not in self._stubs: + self._stubs["delete_index_endpoint"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint", + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_index_endpoint"] + + @property + def deploy_index( + self, + ) -> Callable[ + [index_endpoint_service.DeployIndexRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy_index" not in self._stubs: + self._stubs["deploy_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex", + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["deploy_index"] + + @property + def undeploy_index( + self, + ) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undeploy_index" not in self._stubs: + self._stubs["undeploy_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex", + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["undeploy_index"] + + +__all__ = ("IndexEndpointServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py new file mode 100644 index 0000000000..bf9cebd517 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import IndexServiceClient +from .async_client import IndexServiceAsyncClient + +__all__ = ( + "IndexServiceClient", + "IndexServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py new file mode 100644 index 0000000000..0d1a875910 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -0,0 +1,640 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport +from .client import IndexServiceClient + + +class IndexServiceAsyncClient: + """A service for creating and managing AI Platform's Index + resources. + """ + + _client: IndexServiceClient + + DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexServiceClient.index_path) + parse_index_path = staticmethod(IndexServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod( + IndexServiceClient.parse_index_endpoint_path + ) + + common_billing_account_path = staticmethod( + IndexServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + IndexServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(IndexServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path) + + common_organization_path = staticmethod(IndexServiceClient.common_organization_path) + parse_common_organization_path = staticmethod( + IndexServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod(IndexServiceClient.common_project_path) + parse_common_project_path = staticmethod( + IndexServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod(IndexServiceClient.common_location_path) + parse_common_location_path = staticmethod( + IndexServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexServiceTransport: + """Return the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(IndexServiceClient).get_transport_class, type(IndexServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, IndexServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = IndexServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_index( + self, + request: index_service.CreateIndexRequest = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Index. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateIndexRequest`): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_service.CreateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index( + self, + request: index_service.GetIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetIndexRequest`): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + name (:class:`str`): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_service.GetIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_indexes( + self, + request: index_service.ListIndexesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesAsyncPager: + r"""Lists Indexes in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListIndexesRequest`): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_service.ListIndexesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_indexes, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index( + self, + request: index_service.UpdateIndexRequest = None, + *, + index: gca_index.Index = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an Index. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest`): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + `FieldMask `__. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_service.UpdateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index.name", request.index.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_index( + self, + request: index_service.DeleteIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest`): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + name (:class:`str`): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_service.DeleteIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("IndexServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_service/client.py new file mode 100644 index 0000000000..a5cf4e15a3 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -0,0 +1,833 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexServiceGrpcTransport +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +class IndexServiceClientMeta(type): + """Metaclass for the IndexService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] + _transport_registry["grpc"] = IndexServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[IndexServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexServiceClient(metaclass=IndexServiceClientMeta): + """A service for creating and managing AI Platform's Index + resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexServiceTransport: + """Return the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def index_path(project: str, location: str, index: str,) -> str: + """Return a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format( + project=project, location=location, index=index, + ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str, str]: + """Parse a index path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str, location: str, index_endpoint: str,) -> str: + """Return a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format( + project=project, location=location, index_endpoint=index_endpoint, + ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str, str]: + """Parse a index_endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, IndexServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexServiceTransport): + # transport is a IndexServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_index( + self, + request: index_service.CreateIndexRequest = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Index. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateIndexRequest): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + parent (str): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.CreateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.CreateIndexRequest): + request = index_service.CreateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index( + self, + request: index_service.GetIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetIndexRequest): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.GetIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.GetIndexRequest): + request = index_service.GetIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_indexes( + self, + request: index_service.ListIndexesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesPager: + r"""Lists Indexes in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + parent (str): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.ListIndexesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.ListIndexesRequest): + request = index_service.ListIndexesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_indexes] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index( + self, + request: index_service.UpdateIndexRequest = None, + *, + index: gca_index.Index = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates an Index. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + `FieldMask `__. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.UpdateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.UpdateIndexRequest): + request = index_service.UpdateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index.name", request.index.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_index( + self, + request: index_service.DeleteIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.DeleteIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.DeleteIndexRequest): + request = index_service.DeleteIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("IndexServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py new file mode 100644 index 0000000000..18b3cea2f7 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service + + +class ListIndexesPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., index_service.ListIndexesResponse], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[index.Index]: + for page in self.pages: + yield from page.indexes + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListIndexesAsyncPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[index_service.ListIndexesResponse]], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[index.Index]: + async def async_generator(): + async for page in self.pages: + for response in page.indexes: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py new file mode 100644 index 0000000000..f9345ef29c --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexServiceTransport +from .grpc import IndexServiceGrpcTransport +from .grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] +_transport_registry["grpc"] = IndexServiceGrpcTransport +_transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport + +__all__ = ( + "IndexServiceTransport", + "IndexServiceGrpcTransport", + "IndexServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py new file mode 100644 index 0000000000..c634a71107 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class IndexServiceTransport(abc.ABC): + """Abstract transport class for IndexService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=self._scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index: gapic_v1.method.wrap_method( + self.create_index, default_timeout=5.0, client_info=client_info, + ), + self.get_index: gapic_v1.method.wrap_method( + self.get_index, default_timeout=5.0, client_info=client_info, + ), + self.list_indexes: gapic_v1.method.wrap_method( + self.list_indexes, default_timeout=5.0, client_info=client_info, + ), + self.update_index: gapic_v1.method.wrap_method( + self.update_index, default_timeout=5.0, client_info=client_info, + ), + self.delete_index: gapic_v1.method.wrap_method( + self.delete_index, default_timeout=5.0, client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index( + self, + ) -> typing.Callable[ + [index_service.CreateIndexRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_index( + self, + ) -> typing.Callable[ + [index_service.GetIndexRequest], + typing.Union[index.Index, typing.Awaitable[index.Index]], + ]: + raise NotImplementedError() + + @property + def list_indexes( + self, + ) -> typing.Callable[ + [index_service.ListIndexesRequest], + typing.Union[ + index_service.ListIndexesResponse, + typing.Awaitable[index_service.ListIndexesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_index( + self, + ) -> typing.Callable[ + [index_service.UpdateIndexRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_index( + self, + ) -> typing.Callable[ + [index_service.DeleteIndexRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + +__all__ = ("IndexServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py new file mode 100644 index 0000000000..4bb35d18d6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexServiceGrpcTransport(IndexServiceTransport): + """gRPC backend transport for IndexService. + + A service for creating and managing AI Platform's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_index( + self, + ) -> Callable[[index_service.CreateIndexRequest], operations.Operation]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_index" not in self._stubs: + self._stubs["create_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex", + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_index"] + + @property + def get_index(self) -> Callable[[index_service.GetIndexRequest], index.Index]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + ~.Index]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_index" not in self._stubs: + self._stubs["get_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/GetIndex", + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs["get_index"] + + @property + def list_indexes( + self, + ) -> Callable[ + [index_service.ListIndexesRequest], index_service.ListIndexesResponse + ]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + ~.ListIndexesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_indexes" not in self._stubs: + self._stubs["list_indexes"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes", + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs["list_indexes"] + + @property + def update_index( + self, + ) -> Callable[[index_service.UpdateIndexRequest], operations.Operation]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_index" not in self._stubs: + self._stubs["update_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex", + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_index"] + + @property + def delete_index( + self, + ) -> Callable[[index_service.DeleteIndexRequest], operations.Operation]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_index" not in self._stubs: + self._stubs["delete_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex", + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_index"] + + +__all__ = ("IndexServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..cbcf84110e --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexServiceGrpcTransport + + +class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): + """gRPC AsyncIO backend transport for IndexService. + + A service for creating and managing AI Platform's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index( + self, + ) -> Callable[[index_service.CreateIndexRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_index" not in self._stubs: + self._stubs["create_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex", + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_index"] + + @property + def get_index( + self, + ) -> Callable[[index_service.GetIndexRequest], Awaitable[index.Index]]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + Awaitable[~.Index]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_index" not in self._stubs: + self._stubs["get_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/GetIndex", + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs["get_index"] + + @property + def list_indexes( + self, + ) -> Callable[ + [index_service.ListIndexesRequest], Awaitable[index_service.ListIndexesResponse] + ]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + Awaitable[~.ListIndexesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_indexes" not in self._stubs: + self._stubs["list_indexes"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes", + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs["list_indexes"] + + @property + def update_index( + self, + ) -> Callable[[index_service.UpdateIndexRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_index" not in self._stubs: + self._stubs["update_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex", + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_index"] + + @property + def delete_index( + self, + ) -> Callable[[index_service.DeleteIndexRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_index" not in self._stubs: + self._stubs["delete_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex", + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_index"] + + +__all__ = ("IndexServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index 366cbf0f52..e736d5de17 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.job_service import pagers from google.cloud.aiplatform_v1beta1.types import batch_prediction_job @@ -48,13 +48,21 @@ from google.cloud.aiplatform_v1beta1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) +from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import job_service from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore from google.protobuf import struct_pb2 as struct # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore from google.rpc import status_pb2 as status # type: ignore @@ -85,6 +93,8 @@ class JobServiceAsyncClient: ) dataset_path = staticmethod(JobServiceClient.dataset_path) parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) + endpoint_path = staticmethod(JobServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(JobServiceClient.parse_endpoint_path) hyperparameter_tuning_job_path = staticmethod( JobServiceClient.hyperparameter_tuning_job_path ) @@ -93,6 +103,16 @@ class JobServiceAsyncClient: ) model_path = staticmethod(JobServiceClient.model_path) parse_model_path = staticmethod(JobServiceClient.parse_model_path) + model_deployment_monitoring_job_path = staticmethod( + JobServiceClient.model_deployment_monitoring_job_path + ) + parse_model_deployment_monitoring_job_path = staticmethod( + JobServiceClient.parse_model_deployment_monitoring_job_path + ) + network_path = staticmethod(JobServiceClient.network_path) + parse_network_path = staticmethod(JobServiceClient.parse_network_path) + tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) trial_path = staticmethod(JobServiceClient.trial_path) parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) @@ -228,7 +248,7 @@ async def create_custom_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest`): The request object. Request message for - ``JobService.CreateCustomJob``. + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. parent (:class:`str`): Required. The resource name of the Location to create the CustomJob in. Format: @@ -315,7 +335,7 @@ async def get_custom_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest`): The request object. Request message for - ``JobService.GetCustomJob``. + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. name (:class:`str`): Required. The name of the CustomJob resource. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -394,7 +414,7 @@ async def list_custom_jobs( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest`): The request object. Request message for - ``JobService.ListCustomJobs``. + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. parent (:class:`str`): Required. The resource name of the Location to list the CustomJobs from. Format: @@ -413,7 +433,7 @@ async def list_custom_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager: Response message for - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -477,7 +497,7 @@ async def delete_custom_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest`): The request object. Request message for - ``JobService.DeleteCustomJob``. + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. name (:class:`str`): Required. The name of the CustomJob resource to be deleted. Format: @@ -570,21 +590,21 @@ async def cancel_custom_job( r"""Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest`): The request object. Request message for - ``JobService.CancelCustomJob``. + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. name (:class:`str`): Required. The name of the CustomJob to cancel. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -651,7 +671,7 @@ async def create_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest`): The request object. Request message for - [DataLabelingJobService.CreateDataLabelingJob][]. + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. parent (:class:`str`): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -734,7 +754,7 @@ async def get_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest`): The request object. Request message for - [DataLabelingJobService.GetDataLabelingJob][]. + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. name (:class:`str`): Required. The name of the DataLabelingJob. Format: ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` @@ -808,7 +828,7 @@ async def list_data_labeling_jobs( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest`): The request object. Request message for - [DataLabelingJobService.ListDataLabelingJobs][]. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. parent (:class:`str`): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -826,7 +846,7 @@ async def list_data_labeling_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: Response message for - ``JobService.ListDataLabelingJobs``. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. Iterating over this object will yield results and resolve additional pages automatically. @@ -890,7 +910,7 @@ async def delete_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest`): The request object. Request message for - ``JobService.DeleteDataLabelingJob``. + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. name (:class:`str`): Required. The name of the DataLabelingJob to be deleted. Format: @@ -986,7 +1006,7 @@ async def cancel_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest`): The request object. Request message for - [DataLabelingJobService.CancelDataLabelingJob][]. + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. name (:class:`str`): Required. The name of the DataLabelingJob. Format: ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` @@ -1053,7 +1073,7 @@ async def create_hyperparameter_tuning_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.CreateHyperparameterTuningJob``. + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. parent (:class:`str`): Required. The resource name of the Location to create the HyperparameterTuningJob in. Format: @@ -1138,7 +1158,7 @@ async def get_hyperparameter_tuning_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.GetHyperparameterTuningJob``. + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. name (:class:`str`): Required. The name of the HyperparameterTuningJob resource. Format: @@ -1214,7 +1234,7 @@ async def list_hyperparameter_tuning_jobs( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest`): The request object. Request message for - ``JobService.ListHyperparameterTuningJobs``. + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. parent (:class:`str`): Required. The resource name of the Location to list the HyperparameterTuningJobs from. Format: @@ -1233,7 +1253,7 @@ async def list_hyperparameter_tuning_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: Response message for - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -1297,7 +1317,7 @@ async def delete_hyperparameter_tuning_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.DeleteHyperparameterTuningJob``. + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. name (:class:`str`): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: @@ -1391,21 +1411,21 @@ async def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest`): The request object. Request message for - ``JobService.CancelHyperparameterTuningJob``. + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. name (:class:`str`): Required. The name of the HyperparameterTuningJob to cancel. Format: @@ -1474,7 +1494,7 @@ async def create_batch_prediction_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest`): The request object. Request message for - ``JobService.CreateBatchPredictionJob``. + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. parent (:class:`str`): Required. The resource name of the Location to create the BatchPredictionJob in. Format: @@ -1499,7 +1519,7 @@ async def create_batch_prediction_job( Returns: google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1561,7 +1581,7 @@ async def get_batch_prediction_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest`): The request object. Request message for - ``JobService.GetBatchPredictionJob``. + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. name (:class:`str`): Required. The name of the BatchPredictionJob resource. Format: @@ -1579,7 +1599,7 @@ async def get_batch_prediction_job( Returns: google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1639,7 +1659,7 @@ async def list_batch_prediction_jobs( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest`): The request object. Request message for - ``JobService.ListBatchPredictionJobs``. + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. parent (:class:`str`): Required. The resource name of the Location to list the BatchPredictionJobs from. Format: @@ -1658,7 +1678,7 @@ async def list_batch_prediction_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: Response message for - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -1723,7 +1743,7 @@ async def delete_batch_prediction_job( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest`): The request object. Request message for - ``JobService.DeleteBatchPredictionJob``. + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. name (:class:`str`): Required. The name of the BatchPredictionJob resource to be deleted. Format: @@ -1818,18 +1838,18 @@ async def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest`): The request object. Request message for - ``JobService.CancelBatchPredictionJob``. + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. name (:class:`str`): Required. The name of the BatchPredictionJob to cancel. Format: @@ -1882,6 +1902,693 @@ async def cancel_batch_prediction_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def create_model_deployment_monitoring_job( + self, + request: job_service.CreateModelDeploymentMonitoringJobRequest = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model_deployment_monitoring_job, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def search_model_deployment_monitoring_stats_anomalies( + self, + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest`): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (:class:`str`): + Required. ModelDeploymentMonitoring Job resource name. + Format: + \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The DeployedModel ID of the + [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_model_deployment_monitoring_stats_anomalies, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "model_deployment_monitoring_job", + request.model_deployment_monitoring_job, + ), + ) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_deployment_monitoring_job( + self, + request: job_service.GetModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_model_deployment_monitoring_jobs( + self, + request: job_service.ListModelDeploymentMonitoringJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest`): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_deployment_monitoring_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model_deployment_monitoring_job( + self, + request: job_service.UpdateModelDeploymentMonitoringJobRequest = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a ModelDeploymentMonitoringJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "model_deployment_monitoring_job.name", + request.model_deployment_monitoring_job.name, + ), + ) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_model_deployment_monitoring_job( + self, + request: job_service.DeleteModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a ModelDeploymentMonitoringJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def pause_model_deployment_monitoring_job( + self, + request: job_service.PauseModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.pause_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def resume_model_deployment_monitoring_job( + self, + request: job_service.ResumeModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.resume_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index 81fa0d786f..6764071e9e 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.job_service import pagers from google.cloud.aiplatform_v1beta1.types import batch_prediction_job @@ -52,13 +52,21 @@ from google.cloud.aiplatform_v1beta1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) +from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import job_service from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore from google.protobuf import struct_pb2 as struct # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore from google.rpc import status_pb2 as status # type: ignore @@ -252,6 +260,22 @@ def parse_dataset_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def endpoint_path(project: str, location: str, endpoint: str,) -> str: + """Return a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, location=location, endpoint=endpoint, + ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str, str]: + """Parse a endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def hyperparameter_tuning_job_path( project: str, location: str, hyperparameter_tuning_job: str, @@ -288,6 +312,57 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def model_deployment_monitoring_job_path( + project: str, location: str, model_deployment_monitoring_job: str, + ) -> str: + """Return a fully-qualified model_deployment_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format( + project=project, + location=location, + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + @staticmethod + def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str, str]: + """Parse a model_deployment_monitoring_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str, network: str,) -> str: + """Return a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parse a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_path(project: str, location: str, tensorboard: str,) -> str: + """Return a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format( + project=project, location=location, tensorboard=tensorboard, + ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str, str]: + """Parse a tensorboard path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def trial_path(project: str, location: str, study: str, trial: str,) -> str: """Return a fully-qualified trial string.""" @@ -492,7 +567,7 @@ def create_custom_job( Args: request (google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest): The request object. Request message for - ``JobService.CreateCustomJob``. + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. parent (str): Required. The resource name of the Location to create the CustomJob in. Format: @@ -580,7 +655,7 @@ def get_custom_job( Args: request (google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest): The request object. Request message for - ``JobService.GetCustomJob``. + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. name (str): Required. The name of the CustomJob resource. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -660,7 +735,7 @@ def list_custom_jobs( Args: request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): The request object. Request message for - ``JobService.ListCustomJobs``. + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. parent (str): Required. The resource name of the Location to list the CustomJobs from. Format: @@ -679,7 +754,7 @@ def list_custom_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager: Response message for - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -738,13 +813,13 @@ def delete_custom_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a CustomJob. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest): The request object. Request message for - ``JobService.DeleteCustomJob``. + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. name (str): Required. The name of the CustomJob resource to be deleted. Format: @@ -816,7 +891,7 @@ def delete_custom_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -838,21 +913,21 @@ def cancel_custom_job( r"""Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to ``CANCELLED``. Args: request (google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest): The request object. Request message for - ``JobService.CancelCustomJob``. + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. name (str): Required. The name of the CustomJob to cancel. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -920,7 +995,7 @@ def create_data_labeling_job( Args: request (google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest): The request object. Request message for - [DataLabelingJobService.CreateDataLabelingJob][]. + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. parent (str): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -1004,7 +1079,7 @@ def get_data_labeling_job( Args: request (google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest): The request object. Request message for - [DataLabelingJobService.GetDataLabelingJob][]. + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. name (str): Required. The name of the DataLabelingJob. Format: ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` @@ -1079,7 +1154,7 @@ def list_data_labeling_jobs( Args: request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): The request object. Request message for - [DataLabelingJobService.ListDataLabelingJobs][]. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. parent (str): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -1097,7 +1172,7 @@ def list_data_labeling_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager: Response message for - ``JobService.ListDataLabelingJobs``. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1156,13 +1231,13 @@ def delete_data_labeling_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a DataLabelingJob. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest): The request object. Request message for - ``JobService.DeleteDataLabelingJob``. + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. name (str): Required. The name of the DataLabelingJob to be deleted. Format: @@ -1234,7 +1309,7 @@ def delete_data_labeling_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -1259,7 +1334,7 @@ def cancel_data_labeling_job( Args: request (google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest): The request object. Request message for - [DataLabelingJobService.CancelDataLabelingJob][]. + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. name (str): Required. The name of the DataLabelingJob. Format: ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` @@ -1327,7 +1402,7 @@ def create_hyperparameter_tuning_job( Args: request (google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.CreateHyperparameterTuningJob``. + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. parent (str): Required. The resource name of the Location to create the HyperparameterTuningJob in. Format: @@ -1415,7 +1490,7 @@ def get_hyperparameter_tuning_job( Args: request (google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.GetHyperparameterTuningJob``. + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. name (str): Required. The name of the HyperparameterTuningJob resource. Format: @@ -1494,7 +1569,7 @@ def list_hyperparameter_tuning_jobs( Args: request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): The request object. Request message for - ``JobService.ListHyperparameterTuningJobs``. + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. parent (str): Required. The resource name of the Location to list the HyperparameterTuningJobs from. Format: @@ -1513,7 +1588,7 @@ def list_hyperparameter_tuning_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager: Response message for - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -1574,13 +1649,13 @@ def delete_hyperparameter_tuning_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a HyperparameterTuningJob. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.DeleteHyperparameterTuningJob``. + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. name (str): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: @@ -1654,7 +1729,7 @@ def delete_hyperparameter_tuning_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -1677,21 +1752,21 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Args: request (google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest): The request object. Request message for - ``JobService.CancelHyperparameterTuningJob``. + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. name (str): Required. The name of the HyperparameterTuningJob to cancel. Format: @@ -1763,7 +1838,7 @@ def create_batch_prediction_job( Args: request (google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest): The request object. Request message for - ``JobService.CreateBatchPredictionJob``. + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. parent (str): Required. The resource name of the Location to create the BatchPredictionJob in. Format: @@ -1788,7 +1863,7 @@ def create_batch_prediction_job( Returns: google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1853,7 +1928,7 @@ def get_batch_prediction_job( Args: request (google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest): The request object. Request message for - ``JobService.GetBatchPredictionJob``. + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. name (str): Required. The name of the BatchPredictionJob resource. Format: @@ -1871,7 +1946,7 @@ def get_batch_prediction_job( Returns: google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a ``Model`` to produce predictions + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the @@ -1932,7 +2007,7 @@ def list_batch_prediction_jobs( Args: request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): The request object. Request message for - ``JobService.ListBatchPredictionJobs``. + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. parent (str): Required. The resource name of the Location to list the BatchPredictionJobs from. Format: @@ -1951,7 +2026,7 @@ def list_batch_prediction_jobs( Returns: google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager: Response message for - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] Iterating over this object will yield results and resolve additional pages automatically. @@ -2012,14 +2087,14 @@ def delete_batch_prediction_job( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a BatchPredictionJob. Can only be called on jobs that already finished. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest): The request object. Request message for - ``JobService.DeleteBatchPredictionJob``. + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. name (str): Required. The name of the BatchPredictionJob resource to be deleted. Format: @@ -2093,7 +2168,7 @@ def delete_batch_prediction_job( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -2117,18 +2192,18 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. Args: request (google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest): The request object. Request message for - ``JobService.CancelBatchPredictionJob``. + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. name (str): Required. The name of the BatchPredictionJob to cancel. Format: @@ -2184,6 +2259,737 @@ def cancel_batch_prediction_job( request, retry=retry, timeout=timeout, metadata=metadata, ) + def create_model_deployment_monitoring_job( + self, + request: job_service.CreateModelDeploymentMonitoringJobRequest = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, job_service.CreateModelDeploymentMonitoringJobRequest + ): + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = ( + model_deployment_monitoring_job + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_model_deployment_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def search_model_deployment_monitoring_stats_anomalies( + self, + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + Args: + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The DeployedModel ID of the + [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest + ): + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = ( + model_deployment_monitoring_job + ) + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.search_model_deployment_monitoring_stats_anomalies + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "model_deployment_monitoring_job", + request.model_deployment_monitoring_job, + ), + ) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_deployment_monitoring_job( + self, + request: job_service.GetModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetModelDeploymentMonitoringJobRequest): + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_model_deployment_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_model_deployment_monitoring_jobs( + self, + request: job_service.ListModelDeploymentMonitoringJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListModelDeploymentMonitoringJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, job_service.ListModelDeploymentMonitoringJobsRequest + ): + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_model_deployment_monitoring_jobs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model_deployment_monitoring_job( + self, + request: job_service.UpdateModelDeploymentMonitoringJobRequest = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a ModelDeploymentMonitoringJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.UpdateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, job_service.UpdateModelDeploymentMonitoringJobRequest + ): + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = ( + model_deployment_monitoring_job + ) + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_model_deployment_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "model_deployment_monitoring_job.name", + request.model_deployment_monitoring_job.name, + ), + ) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_model_deployment_monitoring_job( + self, + request: job_service.DeleteModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a ModelDeploymentMonitoringJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, job_service.DeleteModelDeploymentMonitoringJobRequest + ): + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_model_deployment_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def pause_model_deployment_monitoring_job( + self, + request: job_service.PauseModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Args: + request (google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.PauseModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, job_service.PauseModelDeploymentMonitoringJobRequest + ): + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.pause_model_deployment_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def resume_model_deployment_monitoring_job( + self, + request: job_service.ResumeModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ResumeModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, job_service.ResumeModelDeploymentMonitoringJobRequest + ): + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.resume_model_deployment_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py index 6c3da33d0a..2ccecac0eb 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py @@ -31,6 +31,10 @@ from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) class ListCustomJobsPager: @@ -549,3 +553,290 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse + ], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__( + self, + ) -> Iterable[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + for page in self.pages: + yield from page.monitoring_stats + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., + Awaitable[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse + ], + ], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse + ]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__( + self, + ) -> AsyncIterable[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies + ]: + async def async_generator(): + async for page in self.pages: + for response in page.monitoring_stats: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., job_service.ListModelDeploymentMonitoringJobsResponse], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__( + self, + ) -> Iterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + for page in self.pages: + yield from page.model_deployment_monitoring_jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsAsyncPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse] + ], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__( + self, + ) -> AsyncIterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + async def async_generator(): + async for page in self.pages: + for response in page.model_deployment_monitoring_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index 3d1f0be59b..fbe6938185 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -41,6 +41,10 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) from google.longrunning import operations_pb2 as operations # type: ignore from google.protobuf import empty_pb2 as empty # type: ignore @@ -86,10 +90,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -97,6 +101,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -106,20 +113,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -213,6 +217,46 @@ def _prep_wrapped_messages(self, client_info): default_timeout=5.0, client_info=client_info, ), + self.create_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.create_model_deployment_monitoring_job, + default_timeout=60.0, + client_info=client_info, + ), + self.search_model_deployment_monitoring_stats_anomalies: gapic_v1.method.wrap_method( + self.search_model_deployment_monitoring_stats_anomalies, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.get_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_deployment_monitoring_jobs: gapic_v1.method.wrap_method( + self.list_model_deployment_monitoring_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.update_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.update_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.delete_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.pause_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.pause_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.resume_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.resume_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), } @property @@ -432,5 +476,95 @@ def cancel_batch_prediction_job( ]: raise NotImplementedError() + @property + def create_model_deployment_monitoring_job( + self, + ) -> typing.Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + typing.Union[ + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + typing.Awaitable[ + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob + ], + ], + ]: + raise NotImplementedError() + + @property + def search_model_deployment_monitoring_stats_anomalies( + self, + ) -> typing.Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + typing.Union[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + typing.Awaitable[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse + ], + ], + ]: + raise NotImplementedError() + + @property + def get_model_deployment_monitoring_job( + self, + ) -> typing.Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + typing.Union[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + typing.Awaitable[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob + ], + ], + ]: + raise NotImplementedError() + + @property + def list_model_deployment_monitoring_jobs( + self, + ) -> typing.Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + typing.Union[ + job_service.ListModelDeploymentMonitoringJobsResponse, + typing.Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_model_deployment_monitoring_job( + self, + ) -> typing.Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_model_deployment_monitoring_job( + self, + ) -> typing.Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def pause_model_deployment_monitoring_job( + self, + ) -> typing.Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def resume_model_deployment_monitoring_job( + self, + ) -> typing.Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + __all__ = ("JobServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index 763f510e5b..50a54d468f 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -42,6 +42,10 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) from google.longrunning import operations_pb2 as operations # type: ignore from google.protobuf import empty_pb2 as empty # type: ignore @@ -123,7 +127,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -131,70 +138,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -202,18 +189,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -227,7 +204,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -262,7 +239,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property @@ -395,15 +373,15 @@ def cancel_custom_job( Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to ``CANCELLED``. Returns: @@ -700,15 +678,15 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Returns: @@ -857,11 +835,11 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. @@ -883,5 +861,260 @@ def cancel_batch_prediction_job( ) return self._stubs["cancel_batch_prediction_job"] + @property + def create_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "create_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob", + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs["create_model_deployment_monitoring_job"] + + @property + def search_model_deployment_monitoring_stats_anomalies( + self, + ) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + ]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + ~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_model_deployment_monitoring_stats_anomalies" not in self._stubs: + self._stubs[ + "search_model_deployment_monitoring_stats_anomalies" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies", + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs["search_model_deployment_monitoring_stats_anomalies"] + + @property + def get_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "get_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob", + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs["get_model_deployment_monitoring_job"] + + @property + def list_model_deployment_monitoring_jobs( + self, + ) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + job_service.ListModelDeploymentMonitoringJobsResponse, + ]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + ~.ListModelDeploymentMonitoringJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_deployment_monitoring_jobs" not in self._stubs: + self._stubs[ + "list_model_deployment_monitoring_jobs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs", + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs["list_model_deployment_monitoring_jobs"] + + @property + def update_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], operations.Operation + ]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "update_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob", + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_model_deployment_monitoring_job"] + + @property + def delete_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], operations.Operation + ]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "delete_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob", + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_model_deployment_monitoring_job"] + + @property + def pause_model_deployment_monitoring_job( + self, + ) -> Callable[[job_service.PauseModelDeploymentMonitoringJobRequest], empty.Empty]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "pause_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "pause_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob", + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["pause_model_deployment_monitoring_job"] + + @property + def resume_model_deployment_monitoring_job( + self, + ) -> Callable[[job_service.ResumeModelDeploymentMonitoringJobRequest], empty.Empty]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "resume_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "resume_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob", + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["resume_model_deployment_monitoring_job"] + __all__ = ("JobServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py index 07655ba262..b16a2c7cc7 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -43,6 +43,10 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) from google.longrunning import operations_pb2 as operations # type: ignore from google.protobuf import empty_pb2 as empty # type: ignore @@ -78,7 +82,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -156,10 +160,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -168,7 +172,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -176,70 +183,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -247,18 +234,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: @@ -407,15 +384,15 @@ def cancel_custom_job( Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetCustomJob`` + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - ``CustomJob.error`` - value with a ``google.rpc.Status.code`` of + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``CustomJob.state`` + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to ``CANCELLED``. Returns: @@ -718,15 +695,15 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetHyperparameterTuningJob`` + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - ``HyperparameterTuningJob.error`` - value with a ``google.rpc.Status.code`` of + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``HyperparameterTuningJob.state`` + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to ``CANCELLED``. Returns: @@ -879,11 +856,11 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - ``JobService.GetBatchPredictionJob`` + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - ``BatchPredictionJob.state`` + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to ``CANCELLED``. Any files already outputted by the job are not deleted. @@ -905,5 +882,266 @@ def cancel_batch_prediction_job( ) return self._stubs["cancel_batch_prediction_job"] + @property + def create_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob], + ]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "create_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob", + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs["create_model_deployment_monitoring_job"] + + @property + def search_model_deployment_monitoring_stats_anomalies( + self, + ) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse], + ]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_model_deployment_monitoring_stats_anomalies" not in self._stubs: + self._stubs[ + "search_model_deployment_monitoring_stats_anomalies" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies", + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs["search_model_deployment_monitoring_stats_anomalies"] + + @property + def get_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob], + ]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "get_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob", + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs["get_model_deployment_monitoring_job"] + + @property + def list_model_deployment_monitoring_jobs( + self, + ) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse], + ]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_deployment_monitoring_jobs" not in self._stubs: + self._stubs[ + "list_model_deployment_monitoring_jobs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs", + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs["list_model_deployment_monitoring_jobs"] + + @property + def update_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "update_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob", + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_model_deployment_monitoring_job"] + + @property + def delete_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "delete_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob", + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_model_deployment_monitoring_job"] + + @property + def pause_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "pause_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "pause_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob", + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["pause_model_deployment_monitoring_job"] + + @property + def resume_model_deployment_monitoring_job( + self, + ) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "resume_model_deployment_monitoring_job" not in self._stubs: + self._stubs[ + "resume_model_deployment_monitoring_job" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob", + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["resume_model_deployment_monitoring_job"] + __all__ = ("JobServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py new file mode 100644 index 0000000000..8e9c09c94d --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import MetadataServiceClient +from .async_client import MetadataServiceAsyncClient + +__all__ = ( + "MetadataServiceClient", + "MetadataServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py new file mode 100644 index 0000000000..42246f3130 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -0,0 +1,2479 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport +from .client import MetadataServiceClient + + +class MetadataServiceAsyncClient: + """Service for reading and writing metadata entries.""" + + _client: MetadataServiceClient + + DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(MetadataServiceClient.artifact_path) + parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) + context_path = staticmethod(MetadataServiceClient.context_path) + parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) + execution_path = staticmethod(MetadataServiceClient.execution_path) + parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) + metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) + parse_metadata_schema_path = staticmethod( + MetadataServiceClient.parse_metadata_schema_path + ) + metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) + parse_metadata_store_path = staticmethod( + MetadataServiceClient.parse_metadata_store_path + ) + + common_billing_account_path = staticmethod( + MetadataServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + MetadataServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + MetadataServiceClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + MetadataServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + MetadataServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod(MetadataServiceClient.common_project_path) + parse_common_project_path = staticmethod( + MetadataServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod(MetadataServiceClient.common_location_path) + parse_common_location_path = staticmethod( + MetadataServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Return the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = MetadataServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_metadata_store( + self, + request: metadata_service.CreateMetadataStoreRequest = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Initializes a MetadataStore, including allocation of + resources. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest`): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + parent (:class:`str`): + Required. The resource name of the + Location where the MetadataStore should + be created. Format: + projects/{project}/locations/{location}/ + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (:class:`google.cloud.aiplatform_v1beta1.types.MetadataStore`): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (:class:`str`): + The {metadatastore} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_metadata_store( + self, + request: metadata_service.GetMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest`): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + name (:class:`str`): + Required. The resource name of the + MetadataStore to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_metadata_stores( + self, + request: metadata_service.ListMetadataStoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresAsyncPager: + r"""Lists MetadataStores for a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest`): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + parent (:class:`str`): + Required. The Location whose + MetadataStores should be listed. Format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListMetadataStoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_stores, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataStoresAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_metadata_store( + self, + request: metadata_service.DeleteMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest`): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + name (:class:`str`): + Required. The resource name of the + MetadataStore to delete. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.DeleteMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_artifact( + self, + request: metadata_service.CreateArtifactRequest = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest`): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the Artifact should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (:class:`str`): + The {artifact} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_artifact( + self, + request: metadata_service.GetArtifactRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetArtifactRequest`): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + name (:class:`str`): + Required. The resource name of the + Artifact to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_artifacts( + self, + request: metadata_service.ListArtifactsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsAsyncPager: + r"""Lists Artifacts in the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest`): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + parent (:class:`str`): + Required. The MetadataStore whose + Artifacts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_artifacts, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListArtifactsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_artifact( + self, + request: metadata_service.UpdateArtifactRequest = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest`): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.UpdateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("artifact.name", request.artifact.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_context( + self, + request: metadata_service.CreateContextRequest = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateContextRequest`): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the Context should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (:class:`str`): + The {context} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_context( + self, + request: metadata_service.GetContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetContextRequest`): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + name (:class:`str`): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_contexts( + self, + request: metadata_service.ListContextsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsAsyncPager: + r"""Lists Contexts on the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListContextsRequest`): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + parent (:class:`str`): + Required. The MetadataStore whose + Contexts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_contexts, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListContextsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_context( + self, + request: metadata_service.UpdateContextRequest = None, + *, + context: gca_context.Context = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateContextRequest`): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.UpdateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("context.name", request.context.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_context( + self, + request: metadata_service.DeleteContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a stored Context. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteContextRequest`): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + name (:class:`str`): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.DeleteContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def add_context_artifacts_and_executions( + self, + request: metadata_service.AddContextArtifactsAndExecutionsRequest = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest`): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + context (:class:`str`): + Required. The resource name of the + Context that the Artifacts and + Executions belong to. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (:class:`Sequence[str]`): + The resource names of the Artifacts + to attribute to the Context. + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (:class:`Sequence[str]`): + The resource names of the Executions + to associate with the Context. + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + + if artifacts: + request.artifacts.extend(artifacts) + if executions: + request.executions.extend(executions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_artifacts_and_executions, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def add_context_children( + self, + request: metadata_service.AddContextChildrenRequest = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with INVALID_ARGUMENT error. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest`): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + context (:class:`str`): + Required. The resource name of the + parent Context. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (:class:`Sequence[str]`): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.AddContextChildrenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + + if child_contexts: + request.child_contexts.extend(child_contexts) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_children, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def query_context_lineage_subgraph( + self, + request: metadata_service.QueryContextLineageSubgraphRequest = None, + *, + context: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest`): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + context (:class:`str`): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.QueryContextLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_context_lineage_subgraph, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_execution( + self, + request: metadata_service.CreateExecutionRequest = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest`): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the Execution should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (:class:`str`): + The {execution} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_execution( + self, + request: metadata_service.GetExecutionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetExecutionRequest`): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + name (:class:`str`): + Required. The resource name of the + Execution to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_executions( + self, + request: metadata_service.ListExecutionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsAsyncPager: + r"""Lists Executions in the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest`): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + parent (:class:`str`): + Required. The MetadataStore whose + Executions should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_executions, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExecutionsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_execution( + self, + request: metadata_service.UpdateExecutionRequest = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest`): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.UpdateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution.name", request.execution.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def add_execution_events( + self, + request: metadata_service.AddExecutionEventsRequest = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events for denoting whether each Artifact was an + input or output for a given Execution. If any Events + already exist between the Execution and any of the + specified Artifacts they are simply skipped. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest`): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + execution (:class:`str`): + Required. The resource name of the + Execution that the Events connect + Artifacts with. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.Event]`): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.AddExecutionEventsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if execution is not None: + request.execution = execution + + if events: + request.events.extend(events) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_execution_events, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def query_execution_inputs_and_outputs( + self, + request: metadata_service.QueryExecutionInputsAndOutputsRequest = None, + *, + execution: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest`): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (:class:`str`): + Required. The resource name of the + Execution whose input and output + Artifacts should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_execution_inputs_and_outputs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_metadata_schema( + self, + request: metadata_service.CreateMetadataSchemaRequest = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates an MetadataSchema. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest`): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the MetadataSchema + should be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (:class:`google.cloud.aiplatform_v1beta1.types.MetadataSchema`): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (:class:`str`): + The {metadata_schema} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.CreateMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_schema, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_metadata_schema( + self, + request: metadata_service.GetMetadataSchemaRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest`): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + name (:class:`str`): + Required. The resource name of the + MetadataSchema to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.GetMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_schema, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_metadata_schemas( + self, + request: metadata_service.ListMetadataSchemasRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasAsyncPager: + r"""Lists MetadataSchemas. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest`): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + parent (:class:`str`): + Required. The MetadataStore whose + MetadataSchemas should be listed. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.ListMetadataSchemasRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_schemas, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataSchemasAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_artifact_lineage_subgraph( + self, + request: metadata_service.QueryArtifactLineageSubgraphRequest = None, + *, + artifact: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest`): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (:class:`str`): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("artifact", request.artifact),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetadataServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py new file mode 100644 index 0000000000..dc1e9c74ba --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -0,0 +1,2762 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MetadataServiceGrpcTransport +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +class MetadataServiceClientMeta(type): + """Metaclass for the MetadataService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[MetadataServiceTransport]] + _transport_registry["grpc"] = MetadataServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[MetadataServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MetadataServiceClient(metaclass=MetadataServiceClientMeta): + """Service for reading and writing metadata entries.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Return the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def artifact_path( + project: str, location: str, metadata_store: str, artifact: str, + ) -> str: + """Return a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str, str]: + """Parse a artifact path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def context_path( + project: str, location: str, metadata_store: str, context: str, + ) -> str: + """Return a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str, str]: + """Parse a context path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def execution_path( + project: str, location: str, metadata_store: str, execution: str, + ) -> str: + """Return a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str, str]: + """Parse a execution path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def metadata_schema_path( + project: str, location: str, metadata_store: str, metadata_schema: str, + ) -> str: + """Return a fully-qualified metadata_schema string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format( + project=project, + location=location, + metadata_store=metadata_store, + metadata_schema=metadata_schema, + ) + + @staticmethod + def parse_metadata_schema_path(path: str) -> Dict[str, str]: + """Parse a metadata_schema path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def metadata_store_path(project: str, location: str, metadata_store: str,) -> str: + """Return a fully-qualified metadata_store string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format( + project=project, location=location, metadata_store=metadata_store, + ) + + @staticmethod + def parse_metadata_store_path(path: str) -> Dict[str, str]: + """Parse a metadata_store path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, MetadataServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MetadataServiceTransport): + # transport is a MetadataServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_metadata_store( + self, + request: metadata_service.CreateMetadataStoreRequest = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Initializes a MetadataStore, including allocation of + resources. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + parent (str): + Required. The resource name of the + Location where the MetadataStore should + be created. Format: + projects/{project}/locations/{location}/ + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (str): + The {metadatastore} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataStoreRequest): + request = metadata_service.CreateMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_metadata_store( + self, + request: metadata_service.GetMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + name (str): + Required. The resource name of the + MetadataStore to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataStoreRequest): + request = metadata_service.GetMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_metadata_stores( + self, + request: metadata_service.ListMetadataStoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresPager: + r"""Lists MetadataStores for a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + parent (str): + Required. The Location whose + MetadataStores should be listed. Format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataStoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataStoresRequest): + request = metadata_service.ListMetadataStoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataStoresPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_metadata_store( + self, + request: metadata_service.DeleteMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + name (str): + Required. The resource name of the + MetadataStore to delete. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): + request = metadata_service.DeleteMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def create_artifact( + self, + request: metadata_service.CreateArtifactRequest = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + parent (str): + Required. The resource name of the + MetadataStore where the Artifact should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (str): + The {artifact} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateArtifactRequest): + request = metadata_service.CreateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_artifact( + self, + request: metadata_service.GetArtifactRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetArtifactRequest): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + name (str): + Required. The resource name of the + Artifact to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetArtifactRequest): + request = metadata_service.GetArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_artifacts( + self, + request: metadata_service.ListArtifactsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsPager: + r"""Lists Artifacts in the MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + parent (str): + Required. The MetadataStore whose + Artifacts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListArtifactsRequest): + request = metadata_service.ListArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListArtifactsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_artifact( + self, + request: metadata_service.UpdateArtifactRequest = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateArtifactRequest): + request = metadata_service.UpdateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("artifact.name", request.artifact.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_context( + self, + request: metadata_service.CreateContextRequest = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateContextRequest): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + parent (str): + Required. The resource name of the + MetadataStore where the Context should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (str): + The {context} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateContextRequest): + request = metadata_service.CreateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_context( + self, + request: metadata_service.GetContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetContextRequest): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + name (str): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetContextRequest): + request = metadata_service.GetContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_contexts( + self, + request: metadata_service.ListContextsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsPager: + r"""Lists Contexts on the MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + parent (str): + Required. The MetadataStore whose + Contexts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListContextsRequest): + request = metadata_service.ListContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListContextsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_context( + self, + request: metadata_service.UpdateContextRequest = None, + *, + context: gca_context.Context = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateContextRequest): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateContextRequest): + request = metadata_service.UpdateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("context.name", request.context.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_context( + self, + request: metadata_service.DeleteContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a stored Context. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteContextRequest): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + name (str): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteContextRequest): + request = metadata_service.DeleteContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def add_context_artifacts_and_executions( + self, + request: metadata_service.AddContextArtifactsAndExecutionsRequest = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Args: + request (google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + context (str): + Required. The resource name of the + Context that the Artifacts and + Executions belong to. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (Sequence[str]): + The resource names of the Artifacts + to attribute to the Context. + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (Sequence[str]): + The resource names of the Executions + to associate with the Context. + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextArtifactsAndExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metadata_service.AddContextArtifactsAndExecutionsRequest + ): + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + if artifacts is not None: + request.artifacts = artifacts + if executions is not None: + request.executions = executions + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.add_context_artifacts_and_executions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def add_context_children( + self, + request: metadata_service.AddContextChildrenRequest = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with INVALID_ARGUMENT error. + + Args: + request (google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + context (str): + Required. The resource name of the + parent Context. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (Sequence[str]): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextChildrenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextChildrenRequest): + request = metadata_service.AddContextChildrenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + if child_contexts is not None: + request.child_contexts = child_contexts + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_children] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def query_context_lineage_subgraph( + self, + request: metadata_service.QueryContextLineageSubgraphRequest = None, + *, + context: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Args: + request (google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + context (str): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryContextLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): + request = metadata_service.QueryContextLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.query_context_lineage_subgraph + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("context", request.context),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_execution( + self, + request: metadata_service.CreateExecutionRequest = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + parent (str): + Required. The resource name of the + MetadataStore where the Execution should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (str): + The {execution} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateExecutionRequest): + request = metadata_service.CreateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_execution( + self, + request: metadata_service.GetExecutionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetExecutionRequest): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + name (str): + Required. The resource name of the + Execution to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetExecutionRequest): + request = metadata_service.GetExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_executions( + self, + request: metadata_service.ListExecutionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsPager: + r"""Lists Executions in the MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + parent (str): + Required. The MetadataStore whose + Executions should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListExecutionsRequest): + request = metadata_service.ListExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExecutionsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_execution( + self, + request: metadata_service.UpdateExecutionRequest = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateExecutionRequest): + request = metadata_service.UpdateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution.name", request.execution.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def add_execution_events( + self, + request: metadata_service.AddExecutionEventsRequest = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events for denoting whether each Artifact was an + input or output for a given Execution. If any Events + already exist between the Execution and any of the + specified Artifacts they are simply skipped. + + Args: + request (google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + execution (str): + Required. The resource name of the + Execution that the Events connect + Artifacts with. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddExecutionEventsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddExecutionEventsRequest): + request = metadata_service.AddExecutionEventsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if execution is not None: + request.execution = execution + if events is not None: + request.events = events + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_execution_events] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def query_execution_inputs_and_outputs( + self, + request: metadata_service.QueryExecutionInputsAndOutputsRequest = None, + *, + execution: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Args: + request (google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (str): + Required. The resource name of the + Execution whose input and output + Artifacts should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryExecutionInputsAndOutputsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metadata_service.QueryExecutionInputsAndOutputsRequest + ): + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.query_execution_inputs_and_outputs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("execution", request.execution),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_metadata_schema( + self, + request: metadata_service.CreateMetadataSchemaRequest = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates an MetadataSchema. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + parent (str): + Required. The resource name of the + MetadataStore where the MetadataSchema + should be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): + request = metadata_service.CreateMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_metadata_schema( + self, + request: metadata_service.GetMetadataSchemaRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + name (str): + Required. The resource name of the + MetadataSchema to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataSchemaRequest): + request = metadata_service.GetMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_metadata_schemas( + self, + request: metadata_service.ListMetadataSchemasRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasPager: + r"""Lists MetadataSchemas. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + parent (str): + Required. The MetadataStore whose + MetadataSchemas should be listed. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataSchemasRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataSchemasRequest): + request = metadata_service.ListMetadataSchemasRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataSchemasPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def query_artifact_lineage_subgraph( + self, + request: metadata_service.QueryArtifactLineageSubgraphRequest = None, + *, + artifact: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Args: + request (google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (str): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryArtifactLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, metadata_service.QueryArtifactLineageSubgraphRequest + ): + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.query_artifact_lineage_subgraph + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("artifact", request.artifact),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetadataServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py new file mode 100644 index 0000000000..979c99e4e8 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py @@ -0,0 +1,676 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store + + +class ListMetadataStoresPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListMetadataStoresResponse], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[metadata_store.MetadataStore]: + for page in self.pages: + yield from page.metadata_stores + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetadataStoresAsyncPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[metadata_store.MetadataStore]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_stores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListArtifactsPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListArtifactsResponse], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[artifact.Artifact]: + for page in self.pages: + yield from page.artifacts + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListArtifactsAsyncPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[artifact.Artifact]: + async def async_generator(): + async for page in self.pages: + for response in page.artifacts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListContextsPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListContextsResponse], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[context.Context]: + for page in self.pages: + yield from page.contexts + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListContextsAsyncPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[context.Context]: + async def async_generator(): + async for page in self.pages: + for response in page.contexts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExecutionsPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListExecutionsResponse], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[execution.Execution]: + for page in self.pages: + yield from page.executions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExecutionsAsyncPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[execution.Execution]: + async def async_generator(): + async for page in self.pages: + for response in page.executions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., metadata_service.ListMetadataSchemasResponse], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[metadata_schema.MetadataSchema]: + for page in self.pages: + yield from page.metadata_schemas + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasAsyncPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[metadata_schema.MetadataSchema]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_schemas: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py new file mode 100644 index 0000000000..a01e7ca986 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import MetadataServiceTransport +from .grpc import MetadataServiceGrpcTransport +from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] +_transport_registry["grpc"] = MetadataServiceGrpcTransport +_transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport + +__all__ = ( + "MetadataServiceTransport", + "MetadataServiceGrpcTransport", + "MetadataServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py new file mode 100644 index 0000000000..5bf2b38261 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py @@ -0,0 +1,502 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MetadataServiceTransport(abc.ABC): + """Abstract transport class for MetadataService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=self._scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_metadata_store: gapic_v1.method.wrap_method( + self.create_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.get_metadata_store: gapic_v1.method.wrap_method( + self.get_metadata_store, default_timeout=5.0, client_info=client_info, + ), + self.list_metadata_stores: gapic_v1.method.wrap_method( + self.list_metadata_stores, default_timeout=5.0, client_info=client_info, + ), + self.delete_metadata_store: gapic_v1.method.wrap_method( + self.delete_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.create_artifact: gapic_v1.method.wrap_method( + self.create_artifact, default_timeout=5.0, client_info=client_info, + ), + self.get_artifact: gapic_v1.method.wrap_method( + self.get_artifact, default_timeout=5.0, client_info=client_info, + ), + self.list_artifacts: gapic_v1.method.wrap_method( + self.list_artifacts, default_timeout=5.0, client_info=client_info, + ), + self.update_artifact: gapic_v1.method.wrap_method( + self.update_artifact, default_timeout=5.0, client_info=client_info, + ), + self.create_context: gapic_v1.method.wrap_method( + self.create_context, default_timeout=5.0, client_info=client_info, + ), + self.get_context: gapic_v1.method.wrap_method( + self.get_context, default_timeout=5.0, client_info=client_info, + ), + self.list_contexts: gapic_v1.method.wrap_method( + self.list_contexts, default_timeout=5.0, client_info=client_info, + ), + self.update_context: gapic_v1.method.wrap_method( + self.update_context, default_timeout=5.0, client_info=client_info, + ), + self.delete_context: gapic_v1.method.wrap_method( + self.delete_context, default_timeout=5.0, client_info=client_info, + ), + self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( + self.add_context_artifacts_and_executions, + default_timeout=5.0, + client_info=client_info, + ), + self.add_context_children: gapic_v1.method.wrap_method( + self.add_context_children, default_timeout=5.0, client_info=client_info, + ), + self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_context_lineage_subgraph, + default_timeout=5.0, + client_info=client_info, + ), + self.create_execution: gapic_v1.method.wrap_method( + self.create_execution, default_timeout=5.0, client_info=client_info, + ), + self.get_execution: gapic_v1.method.wrap_method( + self.get_execution, default_timeout=5.0, client_info=client_info, + ), + self.list_executions: gapic_v1.method.wrap_method( + self.list_executions, default_timeout=5.0, client_info=client_info, + ), + self.update_execution: gapic_v1.method.wrap_method( + self.update_execution, default_timeout=5.0, client_info=client_info, + ), + self.add_execution_events: gapic_v1.method.wrap_method( + self.add_execution_events, default_timeout=5.0, client_info=client_info, + ), + self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( + self.query_execution_inputs_and_outputs, + default_timeout=5.0, + client_info=client_info, + ), + self.create_metadata_schema: gapic_v1.method.wrap_method( + self.create_metadata_schema, + default_timeout=5.0, + client_info=client_info, + ), + self.get_metadata_schema: gapic_v1.method.wrap_method( + self.get_metadata_schema, default_timeout=5.0, client_info=client_info, + ), + self.list_metadata_schemas: gapic_v1.method.wrap_method( + self.list_metadata_schemas, + default_timeout=5.0, + client_info=client_info, + ), + self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_metadata_store( + self, + ) -> typing.Callable[ + [metadata_service.CreateMetadataStoreRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_metadata_store( + self, + ) -> typing.Callable[ + [metadata_service.GetMetadataStoreRequest], + typing.Union[ + metadata_store.MetadataStore, typing.Awaitable[metadata_store.MetadataStore] + ], + ]: + raise NotImplementedError() + + @property + def list_metadata_stores( + self, + ) -> typing.Callable[ + [metadata_service.ListMetadataStoresRequest], + typing.Union[ + metadata_service.ListMetadataStoresResponse, + typing.Awaitable[metadata_service.ListMetadataStoresResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_metadata_store( + self, + ) -> typing.Callable[ + [metadata_service.DeleteMetadataStoreRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def create_artifact( + self, + ) -> typing.Callable[ + [metadata_service.CreateArtifactRequest], + typing.Union[gca_artifact.Artifact, typing.Awaitable[gca_artifact.Artifact]], + ]: + raise NotImplementedError() + + @property + def get_artifact( + self, + ) -> typing.Callable[ + [metadata_service.GetArtifactRequest], + typing.Union[artifact.Artifact, typing.Awaitable[artifact.Artifact]], + ]: + raise NotImplementedError() + + @property + def list_artifacts( + self, + ) -> typing.Callable[ + [metadata_service.ListArtifactsRequest], + typing.Union[ + metadata_service.ListArtifactsResponse, + typing.Awaitable[metadata_service.ListArtifactsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_artifact( + self, + ) -> typing.Callable[ + [metadata_service.UpdateArtifactRequest], + typing.Union[gca_artifact.Artifact, typing.Awaitable[gca_artifact.Artifact]], + ]: + raise NotImplementedError() + + @property + def create_context( + self, + ) -> typing.Callable[ + [metadata_service.CreateContextRequest], + typing.Union[gca_context.Context, typing.Awaitable[gca_context.Context]], + ]: + raise NotImplementedError() + + @property + def get_context( + self, + ) -> typing.Callable[ + [metadata_service.GetContextRequest], + typing.Union[context.Context, typing.Awaitable[context.Context]], + ]: + raise NotImplementedError() + + @property + def list_contexts( + self, + ) -> typing.Callable[ + [metadata_service.ListContextsRequest], + typing.Union[ + metadata_service.ListContextsResponse, + typing.Awaitable[metadata_service.ListContextsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_context( + self, + ) -> typing.Callable[ + [metadata_service.UpdateContextRequest], + typing.Union[gca_context.Context, typing.Awaitable[gca_context.Context]], + ]: + raise NotImplementedError() + + @property + def delete_context( + self, + ) -> typing.Callable[ + [metadata_service.DeleteContextRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def add_context_artifacts_and_executions( + self, + ) -> typing.Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + typing.Union[ + metadata_service.AddContextArtifactsAndExecutionsResponse, + typing.Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def add_context_children( + self, + ) -> typing.Callable[ + [metadata_service.AddContextChildrenRequest], + typing.Union[ + metadata_service.AddContextChildrenResponse, + typing.Awaitable[metadata_service.AddContextChildrenResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_context_lineage_subgraph( + self, + ) -> typing.Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + typing.Union[ + lineage_subgraph.LineageSubgraph, + typing.Awaitable[lineage_subgraph.LineageSubgraph], + ], + ]: + raise NotImplementedError() + + @property + def create_execution( + self, + ) -> typing.Callable[ + [metadata_service.CreateExecutionRequest], + typing.Union[ + gca_execution.Execution, typing.Awaitable[gca_execution.Execution] + ], + ]: + raise NotImplementedError() + + @property + def get_execution( + self, + ) -> typing.Callable[ + [metadata_service.GetExecutionRequest], + typing.Union[execution.Execution, typing.Awaitable[execution.Execution]], + ]: + raise NotImplementedError() + + @property + def list_executions( + self, + ) -> typing.Callable[ + [metadata_service.ListExecutionsRequest], + typing.Union[ + metadata_service.ListExecutionsResponse, + typing.Awaitable[metadata_service.ListExecutionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_execution( + self, + ) -> typing.Callable[ + [metadata_service.UpdateExecutionRequest], + typing.Union[ + gca_execution.Execution, typing.Awaitable[gca_execution.Execution] + ], + ]: + raise NotImplementedError() + + @property + def add_execution_events( + self, + ) -> typing.Callable[ + [metadata_service.AddExecutionEventsRequest], + typing.Union[ + metadata_service.AddExecutionEventsResponse, + typing.Awaitable[metadata_service.AddExecutionEventsResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_execution_inputs_and_outputs( + self, + ) -> typing.Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + typing.Union[ + lineage_subgraph.LineageSubgraph, + typing.Awaitable[lineage_subgraph.LineageSubgraph], + ], + ]: + raise NotImplementedError() + + @property + def create_metadata_schema( + self, + ) -> typing.Callable[ + [metadata_service.CreateMetadataSchemaRequest], + typing.Union[ + gca_metadata_schema.MetadataSchema, + typing.Awaitable[gca_metadata_schema.MetadataSchema], + ], + ]: + raise NotImplementedError() + + @property + def get_metadata_schema( + self, + ) -> typing.Callable[ + [metadata_service.GetMetadataSchemaRequest], + typing.Union[ + metadata_schema.MetadataSchema, + typing.Awaitable[metadata_schema.MetadataSchema], + ], + ]: + raise NotImplementedError() + + @property + def list_metadata_schemas( + self, + ) -> typing.Callable[ + [metadata_service.ListMetadataSchemasRequest], + typing.Union[ + metadata_service.ListMetadataSchemasResponse, + typing.Awaitable[metadata_service.ListMetadataSchemasResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_artifact_lineage_subgraph( + self, + ) -> typing.Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + typing.Union[ + lineage_subgraph.LineageSubgraph, + typing.Awaitable[lineage_subgraph.LineageSubgraph], + ], + ]: + raise NotImplementedError() + + +__all__ = ("MetadataServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py new file mode 100644 index 0000000000..2ae1992f1b --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -0,0 +1,993 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO + + +class MetadataServiceGrpcTransport(MetadataServiceTransport): + """gRPC backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store( + self, + ) -> Callable[[metadata_service.CreateMetadataStoreRequest], operations.Operation]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_store" not in self._stubs: + self._stubs["create_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore", + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_metadata_store"] + + @property + def get_metadata_store( + self, + ) -> Callable[ + [metadata_service.GetMetadataStoreRequest], metadata_store.MetadataStore + ]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + ~.MetadataStore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_store" not in self._stubs: + self._stubs["get_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore", + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs["get_metadata_store"] + + @property + def list_metadata_stores( + self, + ) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + metadata_service.ListMetadataStoresResponse, + ]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + ~.ListMetadataStoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_stores" not in self._stubs: + self._stubs["list_metadata_stores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores", + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs["list_metadata_stores"] + + @property + def delete_metadata_store( + self, + ) -> Callable[[metadata_service.DeleteMetadataStoreRequest], operations.Operation]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore. + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_metadata_store" not in self._stubs: + self._stubs["delete_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore", + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_metadata_store"] + + @property + def create_artifact( + self, + ) -> Callable[[metadata_service.CreateArtifactRequest], gca_artifact.Artifact]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_artifact" not in self._stubs: + self._stubs["create_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact", + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["create_artifact"] + + @property + def get_artifact( + self, + ) -> Callable[[metadata_service.GetArtifactRequest], artifact.Artifact]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_artifact" not in self._stubs: + self._stubs["get_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact", + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs["get_artifact"] + + @property + def list_artifacts( + self, + ) -> Callable[ + [metadata_service.ListArtifactsRequest], metadata_service.ListArtifactsResponse + ]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + ~.ListArtifactsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_artifacts" not in self._stubs: + self._stubs["list_artifacts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts", + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs["list_artifacts"] + + @property + def update_artifact( + self, + ) -> Callable[[metadata_service.UpdateArtifactRequest], gca_artifact.Artifact]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_artifact" not in self._stubs: + self._stubs["update_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact", + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["update_artifact"] + + @property + def create_context( + self, + ) -> Callable[[metadata_service.CreateContextRequest], gca_context.Context]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_context" not in self._stubs: + self._stubs["create_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext", + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["create_context"] + + @property + def get_context( + self, + ) -> Callable[[metadata_service.GetContextRequest], context.Context]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_context" not in self._stubs: + self._stubs["get_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetContext", + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs["get_context"] + + @property + def list_contexts( + self, + ) -> Callable[ + [metadata_service.ListContextsRequest], metadata_service.ListContextsResponse + ]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + ~.ListContextsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_contexts" not in self._stubs: + self._stubs["list_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts", + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs["list_contexts"] + + @property + def update_context( + self, + ) -> Callable[[metadata_service.UpdateContextRequest], gca_context.Context]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_context" not in self._stubs: + self._stubs["update_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext", + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["update_context"] + + @property + def delete_context( + self, + ) -> Callable[[metadata_service.DeleteContextRequest], operations.Operation]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_context" not in self._stubs: + self._stubs["delete_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext", + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_context"] + + @property + def add_context_artifacts_and_executions( + self, + ) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + metadata_service.AddContextArtifactsAndExecutionsResponse, + ]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + ~.AddContextArtifactsAndExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_artifacts_and_executions" not in self._stubs: + self._stubs[ + "add_context_artifacts_and_executions" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions", + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs["add_context_artifacts_and_executions"] + + @property + def add_context_children( + self, + ) -> Callable[ + [metadata_service.AddContextChildrenRequest], + metadata_service.AddContextChildrenResponse, + ]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + ~.AddContextChildrenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_children" not in self._stubs: + self._stubs["add_context_children"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren", + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs["add_context_children"] + + @property + def query_context_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph, + ]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_context_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_context_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph", + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_context_lineage_subgraph"] + + @property + def create_execution( + self, + ) -> Callable[[metadata_service.CreateExecutionRequest], gca_execution.Execution]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_execution" not in self._stubs: + self._stubs["create_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution", + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["create_execution"] + + @property + def get_execution( + self, + ) -> Callable[[metadata_service.GetExecutionRequest], execution.Execution]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_execution" not in self._stubs: + self._stubs["get_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution", + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs["get_execution"] + + @property + def list_executions( + self, + ) -> Callable[ + [metadata_service.ListExecutionsRequest], + metadata_service.ListExecutionsResponse, + ]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + ~.ListExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_executions" not in self._stubs: + self._stubs["list_executions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions", + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs["list_executions"] + + @property + def update_execution( + self, + ) -> Callable[[metadata_service.UpdateExecutionRequest], gca_execution.Execution]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_execution" not in self._stubs: + self._stubs["update_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution", + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["update_execution"] + + @property + def add_execution_events( + self, + ) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + metadata_service.AddExecutionEventsResponse, + ]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events for denoting whether each Artifact was an + input or output for a given Execution. If any Events + already exist between the Execution and any of the + specified Artifacts they are simply skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + ~.AddExecutionEventsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_execution_events" not in self._stubs: + self._stubs["add_execution_events"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents", + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs["add_execution_events"] + + @property + def query_execution_inputs_and_outputs( + self, + ) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + lineage_subgraph.LineageSubgraph, + ]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_execution_inputs_and_outputs" not in self._stubs: + self._stubs[ + "query_execution_inputs_and_outputs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs", + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_execution_inputs_and_outputs"] + + @property + def create_metadata_schema( + self, + ) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + gca_metadata_schema.MetadataSchema, + ]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates an MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_schema" not in self._stubs: + self._stubs["create_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema", + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["create_metadata_schema"] + + @property + def get_metadata_schema( + self, + ) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], metadata_schema.MetadataSchema + ]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_schema" not in self._stubs: + self._stubs["get_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema", + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["get_metadata_schema"] + + @property + def list_metadata_schemas( + self, + ) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + metadata_service.ListMetadataSchemasResponse, + ]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + ~.ListMetadataSchemasResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_schemas" not in self._stubs: + self._stubs["list_metadata_schemas"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas", + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs["list_metadata_schemas"] + + @property + def query_artifact_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph, + ]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_artifact_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_artifact_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph", + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_artifact_lineage_subgraph"] + + +__all__ = ("MetadataServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..2cd00db999 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -0,0 +1,1023 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MetadataServiceGrpcTransport + + +class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): + """gRPC AsyncIO backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store( + self, + ) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_store" not in self._stubs: + self._stubs["create_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore", + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_metadata_store"] + + @property + def get_metadata_store( + self, + ) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Awaitable[metadata_store.MetadataStore], + ]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + Awaitable[~.MetadataStore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_store" not in self._stubs: + self._stubs["get_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore", + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs["get_metadata_store"] + + @property + def list_metadata_stores( + self, + ) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Awaitable[metadata_service.ListMetadataStoresResponse], + ]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + Awaitable[~.ListMetadataStoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_stores" not in self._stubs: + self._stubs["list_metadata_stores"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores", + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs["list_metadata_stores"] + + @property + def delete_metadata_store( + self, + ) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore. + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_metadata_store" not in self._stubs: + self._stubs["delete_metadata_store"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore", + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_metadata_store"] + + @property + def create_artifact( + self, + ) -> Callable[ + [metadata_service.CreateArtifactRequest], Awaitable[gca_artifact.Artifact] + ]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_artifact" not in self._stubs: + self._stubs["create_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact", + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["create_artifact"] + + @property + def get_artifact( + self, + ) -> Callable[[metadata_service.GetArtifactRequest], Awaitable[artifact.Artifact]]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_artifact" not in self._stubs: + self._stubs["get_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact", + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs["get_artifact"] + + @property + def list_artifacts( + self, + ) -> Callable[ + [metadata_service.ListArtifactsRequest], + Awaitable[metadata_service.ListArtifactsResponse], + ]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + Awaitable[~.ListArtifactsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_artifacts" not in self._stubs: + self._stubs["list_artifacts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts", + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs["list_artifacts"] + + @property + def update_artifact( + self, + ) -> Callable[ + [metadata_service.UpdateArtifactRequest], Awaitable[gca_artifact.Artifact] + ]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_artifact" not in self._stubs: + self._stubs["update_artifact"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact", + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs["update_artifact"] + + @property + def create_context( + self, + ) -> Callable[ + [metadata_service.CreateContextRequest], Awaitable[gca_context.Context] + ]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_context" not in self._stubs: + self._stubs["create_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext", + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["create_context"] + + @property + def get_context( + self, + ) -> Callable[[metadata_service.GetContextRequest], Awaitable[context.Context]]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_context" not in self._stubs: + self._stubs["get_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetContext", + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs["get_context"] + + @property + def list_contexts( + self, + ) -> Callable[ + [metadata_service.ListContextsRequest], + Awaitable[metadata_service.ListContextsResponse], + ]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + Awaitable[~.ListContextsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_contexts" not in self._stubs: + self._stubs["list_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts", + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs["list_contexts"] + + @property + def update_context( + self, + ) -> Callable[ + [metadata_service.UpdateContextRequest], Awaitable[gca_context.Context] + ]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_context" not in self._stubs: + self._stubs["update_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext", + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs["update_context"] + + @property + def delete_context( + self, + ) -> Callable[ + [metadata_service.DeleteContextRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_context" not in self._stubs: + self._stubs["delete_context"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext", + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_context"] + + @property + def add_context_artifacts_and_executions( + self, + ) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse], + ]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_artifacts_and_executions" not in self._stubs: + self._stubs[ + "add_context_artifacts_and_executions" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions", + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs["add_context_artifacts_and_executions"] + + @property + def add_context_children( + self, + ) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Awaitable[metadata_service.AddContextChildrenResponse], + ]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + Awaitable[~.AddContextChildrenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_context_children" not in self._stubs: + self._stubs["add_context_children"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren", + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs["add_context_children"] + + @property + def query_context_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph], + ]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_context_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_context_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph", + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_context_lineage_subgraph"] + + @property + def create_execution( + self, + ) -> Callable[ + [metadata_service.CreateExecutionRequest], Awaitable[gca_execution.Execution] + ]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_execution" not in self._stubs: + self._stubs["create_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution", + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["create_execution"] + + @property + def get_execution( + self, + ) -> Callable[ + [metadata_service.GetExecutionRequest], Awaitable[execution.Execution] + ]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_execution" not in self._stubs: + self._stubs["get_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution", + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs["get_execution"] + + @property + def list_executions( + self, + ) -> Callable[ + [metadata_service.ListExecutionsRequest], + Awaitable[metadata_service.ListExecutionsResponse], + ]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + Awaitable[~.ListExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_executions" not in self._stubs: + self._stubs["list_executions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions", + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs["list_executions"] + + @property + def update_execution( + self, + ) -> Callable[ + [metadata_service.UpdateExecutionRequest], Awaitable[gca_execution.Execution] + ]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_execution" not in self._stubs: + self._stubs["update_execution"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution", + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs["update_execution"] + + @property + def add_execution_events( + self, + ) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Awaitable[metadata_service.AddExecutionEventsResponse], + ]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events for denoting whether each Artifact was an + input or output for a given Execution. If any Events + already exist between the Execution and any of the + specified Artifacts they are simply skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + Awaitable[~.AddExecutionEventsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_execution_events" not in self._stubs: + self._stubs["add_execution_events"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents", + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs["add_execution_events"] + + @property + def query_execution_inputs_and_outputs( + self, + ) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Awaitable[lineage_subgraph.LineageSubgraph], + ]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_execution_inputs_and_outputs" not in self._stubs: + self._stubs[ + "query_execution_inputs_and_outputs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs", + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_execution_inputs_and_outputs"] + + @property + def create_metadata_schema( + self, + ) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Awaitable[gca_metadata_schema.MetadataSchema], + ]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates an MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_metadata_schema" not in self._stubs: + self._stubs["create_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema", + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["create_metadata_schema"] + + @property + def get_metadata_schema( + self, + ) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Awaitable[metadata_schema.MetadataSchema], + ]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_metadata_schema" not in self._stubs: + self._stubs["get_metadata_schema"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema", + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs["get_metadata_schema"] + + @property + def list_metadata_schemas( + self, + ) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Awaitable[metadata_service.ListMetadataSchemasResponse], + ]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + Awaitable[~.ListMetadataSchemasResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_metadata_schemas" not in self._stubs: + self._stubs["list_metadata_schemas"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas", + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs["list_metadata_schemas"] + + @property + def query_artifact_lineage_subgraph( + self, + ) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph], + ]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_artifact_lineage_subgraph" not in self._stubs: + self._stubs[ + "query_artifact_lineage_subgraph" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph", + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs["query_artifact_lineage_subgraph"] + + +__all__ = ("MetadataServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py index c4db3f14d7..4e53b6cb5a 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -206,7 +206,7 @@ async def search_migratable_resources( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest`): The request object. Request message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. parent (:class:`str`): Required. The location that the migratable resources should be searched from. It's the AI Platform location @@ -227,7 +227,7 @@ async def search_migratable_resources( Returns: google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: Response message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. Iterating over this object will yield results and resolve additional pages automatically. @@ -296,7 +296,7 @@ async def batch_migrate_resources( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest`): The request object. Request message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. parent (:class:`str`): Required. The location of the migrated resource will live in. Format: @@ -329,7 +329,7 @@ async def batch_migrate_resources( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` Response message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 501f21183f..064fd4b341 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -180,32 +180,32 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Return a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parse a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Return a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parse a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -461,7 +461,7 @@ def search_migratable_resources( Args: request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): The request object. Request message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. parent (str): Required. The location that the migratable resources should be searched from. It's the AI Platform location @@ -482,7 +482,7 @@ def search_migratable_resources( Returns: google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager: Response message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. Iterating over this object will yield results and resolve additional pages automatically. @@ -554,7 +554,7 @@ def batch_migrate_resources( Args: request (google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest): The request object. Request message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. parent (str): Required. The location of the migrated resource will live in. Format: @@ -587,7 +587,7 @@ def batch_migrate_resources( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` Response message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py index cbcb288489..f3324f22c6 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py @@ -71,10 +71,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -82,6 +82,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -91,20 +94,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py index 6789c12718..7c63224a7a 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -110,7 +110,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -118,70 +121,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -189,18 +172,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -214,7 +187,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -249,7 +222,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py index 33e96e7170..100739ea7e 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py @@ -65,7 +65,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -143,10 +143,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -155,7 +155,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -163,70 +166,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -234,18 +217,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index a901ead2b1..6a5c7fb1af 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.model_service import pagers from google.cloud.aiplatform_v1beta1.types import deployed_model_ref @@ -210,7 +210,7 @@ async def upload_model( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.UploadModelRequest`): The request object. Request message for - ``ModelService.UploadModel``. + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. parent (:class:`str`): Required. The resource name of the Location into which to upload the Model. Format: @@ -238,7 +238,7 @@ async def upload_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` Response message of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation. """ @@ -304,7 +304,7 @@ async def get_model( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelRequest`): The request object. Request message for - ``ModelService.GetModel``. + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. name (:class:`str`): Required. The name of the Model resource. Format: ``projects/{project}/locations/{location}/models/{model}`` @@ -375,7 +375,7 @@ async def list_models( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelsRequest`): The request object. Request message for - ``ModelService.ListModels``. + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. parent (:class:`str`): Required. The resource name of the Location to list the Models from. Format: @@ -394,7 +394,7 @@ async def list_models( Returns: google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager: Response message for - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] Iterating over this object will yield results and resolve additional pages automatically. @@ -459,7 +459,7 @@ async def update_model( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateModelRequest`): The request object. Request message for - ``ModelService.UpdateModel``. + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): Required. The Model which replaces the resource on the server. @@ -544,7 +544,7 @@ async def delete_model( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteModelRequest`): The request object. Request message for - ``ModelService.DeleteModel``. + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. name (:class:`str`): Required. The name of the Model resource to be deleted. Format: @@ -643,7 +643,7 @@ async def export_model( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ExportModelRequest`): The request object. Request message for - ``ModelService.ExportModel``. + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. name (:class:`str`): Required. The resource name of the Model to export. Format: @@ -673,7 +673,7 @@ async def export_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` Response message of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation. """ @@ -739,7 +739,7 @@ async def get_model_evaluation( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest`): The request object. Request message for - ``ModelService.GetModelEvaluation``. + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. name (:class:`str`): Required. The name of the ModelEvaluation resource. Format: @@ -815,7 +815,7 @@ async def list_model_evaluations( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest`): The request object. Request message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. parent (:class:`str`): Required. The resource name of the Model to list the ModelEvaluations from. Format: @@ -834,7 +834,7 @@ async def list_model_evaluations( Returns: google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager: Response message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. Iterating over this object will yield results and resolve additional pages automatically. @@ -898,7 +898,7 @@ async def get_model_evaluation_slice( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest`): The request object. Request message for - ``ModelService.GetModelEvaluationSlice``. + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. name (:class:`str`): Required. The name of the ModelEvaluationSlice resource. Format: @@ -974,7 +974,7 @@ async def list_model_evaluation_slices( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest`): The request object. Request message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. parent (:class:`str`): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: @@ -993,7 +993,7 @@ async def list_model_evaluation_slices( Returns: google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: Response message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 8b14e16e0b..f43371ac72 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.model_service import pagers from google.cloud.aiplatform_v1beta1.types import deployed_model_ref @@ -438,13 +438,13 @@ def upload_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Uploads a Model artifact into AI Platform. Args: request (google.cloud.aiplatform_v1beta1.types.UploadModelRequest): The request object. Request message for - ``ModelService.UploadModel``. + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. parent (str): Required. The resource name of the Location into which to upload the Model. Format: @@ -472,7 +472,7 @@ def upload_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` Response message of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation. """ @@ -515,7 +515,7 @@ def upload_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, model_service.UploadModelResponse, @@ -539,7 +539,7 @@ def get_model( Args: request (google.cloud.aiplatform_v1beta1.types.GetModelRequest): The request object. Request message for - ``ModelService.GetModel``. + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. name (str): Required. The name of the Model resource. Format: ``projects/{project}/locations/{location}/models/{model}`` @@ -611,7 +611,7 @@ def list_models( Args: request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): The request object. Request message for - ``ModelService.ListModels``. + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. parent (str): Required. The resource name of the Location to list the Models from. Format: @@ -630,7 +630,7 @@ def list_models( Returns: google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager: Response message for - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] Iterating over this object will yield results and resolve additional pages automatically. @@ -696,7 +696,7 @@ def update_model( Args: request (google.cloud.aiplatform_v1beta1.types.UpdateModelRequest): The request object. Request message for - ``ModelService.UpdateModel``. + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. model (google.cloud.aiplatform_v1beta1.types.Model): Required. The Model which replaces the resource on the server. @@ -774,7 +774,7 @@ def delete_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a Model. Note: Model can only be deleted if there are no DeployedModels created from it. @@ -782,7 +782,7 @@ def delete_model( Args: request (google.cloud.aiplatform_v1beta1.types.DeleteModelRequest): The request object. Request message for - ``ModelService.DeleteModel``. + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. name (str): Required. The name of the Model resource to be deleted. Format: @@ -854,7 +854,7 @@ def delete_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -873,7 +873,7 @@ def export_model( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Exports a trained, exportable, Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export @@ -882,7 +882,7 @@ def export_model( Args: request (google.cloud.aiplatform_v1beta1.types.ExportModelRequest): The request object. Request message for - ``ModelService.ExportModel``. + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. name (str): Required. The resource name of the Model to export. Format: @@ -912,7 +912,7 @@ def export_model( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` Response message of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation. """ @@ -955,7 +955,7 @@ def export_model( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, model_service.ExportModelResponse, @@ -979,7 +979,7 @@ def get_model_evaluation( Args: request (google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest): The request object. Request message for - ``ModelService.GetModelEvaluation``. + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. name (str): Required. The name of the ModelEvaluation resource. Format: @@ -1056,7 +1056,7 @@ def list_model_evaluations( Args: request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): The request object. Request message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. parent (str): Required. The resource name of the Model to list the ModelEvaluations from. Format: @@ -1075,7 +1075,7 @@ def list_model_evaluations( Returns: google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager: Response message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1140,7 +1140,7 @@ def get_model_evaluation_slice( Args: request (google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest): The request object. Request message for - ``ModelService.GetModelEvaluationSlice``. + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. name (str): Required. The name of the ModelEvaluationSlice resource. Format: @@ -1219,7 +1219,7 @@ def list_model_evaluation_slices( Args: request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): The request object. Request message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. parent (str): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: @@ -1238,7 +1238,7 @@ def list_model_evaluation_slices( Returns: google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager: Response message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index 2f87fc98dd..37d2b7a4e7 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -75,10 +75,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -86,6 +86,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -95,20 +98,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index b401612b1c..2cbac70e87 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -112,7 +112,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -120,70 +123,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -191,18 +174,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -216,7 +189,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -251,7 +224,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index d05bebeeec..700014be02 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -67,7 +67,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -145,10 +145,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -157,7 +157,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -165,70 +168,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -236,18 +219,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index 063153700c..b09fbe5746 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -28,12 +28,14 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import pipeline_state from google.cloud.aiplatform_v1beta1.types import training_pipeline @@ -58,10 +60,24 @@ class PipelineServiceAsyncClient: DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT + artifact_path = staticmethod(PipelineServiceClient.artifact_path) + parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) + context_path = staticmethod(PipelineServiceClient.context_path) + parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) + custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) + execution_path = staticmethod(PipelineServiceClient.execution_path) + parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) model_path = staticmethod(PipelineServiceClient.model_path) parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) + network_path = staticmethod(PipelineServiceClient.network_path) + parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod( + PipelineServiceClient.parse_pipeline_job_path + ) training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) parse_training_pipeline_path = staticmethod( PipelineServiceClient.parse_training_pipeline_path @@ -205,7 +221,7 @@ async def create_training_pipeline( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.CreateTrainingPipeline``. + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. parent (:class:`str`): Required. The resource name of the Location to create the TrainingPipeline in. Format: @@ -234,7 +250,7 @@ async def create_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -292,7 +308,7 @@ async def get_training_pipeline( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.GetTrainingPipeline``. + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline resource. Format: @@ -314,7 +330,7 @@ async def get_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -370,7 +386,7 @@ async def list_training_pipelines( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest`): The request object. Request message for - ``PipelineService.ListTrainingPipelines``. + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. parent (:class:`str`): Required. The resource name of the Location to list the TrainingPipelines from. Format: @@ -389,7 +405,7 @@ async def list_training_pipelines( Returns: google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: Response message for - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] Iterating over this object will yield results and resolve additional pages automatically. @@ -453,7 +469,7 @@ async def delete_training_pipeline( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.DeleteTrainingPipeline``. + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline resource to be deleted. Format: @@ -546,21 +562,21 @@ async def cancel_training_pipeline( r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest`): The request object. Request message for - ``PipelineService.CancelTrainingPipeline``. + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline to cancel. Format: @@ -613,6 +629,432 @@ async def cancel_training_pipeline( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def create_pipeline_job( + self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest`): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (:class:`google.cloud.aiplatform_v1beta1.types.PipelineJob`): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (:class:`str`): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_pipeline_job( + self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest`): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_pipeline_jobs( + self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsAsyncPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest`): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_pipeline_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPipelineJobsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_pipeline_job( + self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest`): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_pipeline_job( + self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest`): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 4efc2064b5..b9d6019ce7 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -32,12 +32,14 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import pipeline_state from google.cloud.aiplatform_v1beta1.types import training_pipeline @@ -169,6 +171,64 @@ def transport(self) -> PipelineServiceTransport: """ return self._transport + @staticmethod + def artifact_path( + project: str, location: str, metadata_store: str, artifact: str, + ) -> str: + """Return a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str, str]: + """Parse a artifact path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def context_path( + project: str, location: str, metadata_store: str, context: str, + ) -> str: + """Return a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str, str]: + """Parse a context path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str, location: str, custom_job: str,) -> str: + """Return a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format( + project=project, location=location, custom_job=custom_job, + ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str, str]: + """Parse a custom_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: """Return a fully-qualified endpoint string.""" @@ -185,6 +245,27 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def execution_path( + project: str, location: str, metadata_store: str, execution: str, + ) -> str: + """Return a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str, str]: + """Parse a execution path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def model_path(project: str, location: str, model: str,) -> str: """Return a fully-qualified model string.""" @@ -201,6 +282,37 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_path(project: str, network: str,) -> str: + """Return a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parse a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def pipeline_job_path(project: str, location: str, pipeline_job: str,) -> str: + """Return a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, location=location, pipeline_job=pipeline_job, + ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str, str]: + """Parse a pipeline_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def training_pipeline_path( project: str, location: str, training_pipeline: str, @@ -407,7 +519,7 @@ def create_training_pipeline( Args: request (google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest): The request object. Request message for - ``PipelineService.CreateTrainingPipeline``. + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. parent (str): Required. The resource name of the Location to create the TrainingPipeline in. Format: @@ -436,7 +548,7 @@ def create_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -495,7 +607,7 @@ def get_training_pipeline( Args: request (google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest): The request object. Request message for - ``PipelineService.GetTrainingPipeline``. + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. name (str): Required. The name of the TrainingPipeline resource. Format: @@ -517,7 +629,7 @@ def get_training_pipeline( always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. """ @@ -574,7 +686,7 @@ def list_training_pipelines( Args: request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): The request object. Request message for - ``PipelineService.ListTrainingPipelines``. + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. parent (str): Required. The resource name of the Location to list the TrainingPipelines from. Format: @@ -593,7 +705,7 @@ def list_training_pipelines( Returns: google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager: Response message for - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] Iterating over this object will yield results and resolve additional pages automatically. @@ -652,13 +764,13 @@ def delete_training_pipeline( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a TrainingPipeline. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest): The request object. Request message for - ``PipelineService.DeleteTrainingPipeline``. + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. name (str): Required. The name of the TrainingPipeline resource to be deleted. Format: @@ -730,7 +842,7 @@ def delete_training_pipeline( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -752,21 +864,21 @@ def cancel_training_pipeline( r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to ``CANCELLED``. Args: request (google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest): The request object. Request message for - ``PipelineService.CancelTrainingPipeline``. + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. name (str): Required. The name of the TrainingPipeline to cancel. Format: @@ -820,6 +932,437 @@ def cancel_training_pipeline( request, retry=retry, timeout=timeout, metadata=metadata, ) + def create_pipeline_job( + self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + parent (str): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreatePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreatePipelineJobRequest): + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_pipeline_job( + self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetPipelineJobRequest): + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_pipeline_jobs( + self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListPipelineJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListPipelineJobsRequest): + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPipelineJobsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_pipeline_job( + self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeletePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeletePipelineJobRequest): + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_pipeline_job( + self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelPipelineJobRequest): + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py index db2b4dd3a1..0a4aa3bbc5 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py @@ -26,6 +26,7 @@ Optional, ) +from google.cloud.aiplatform_v1beta1.types import pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline @@ -160,3 +161,131 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPipelineJobsPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., pipeline_service.ListPipelineJobsResponse], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: + for page in self.pages: + yield from page.pipeline_jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPipelineJobsAsyncPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: + async def async_generator(): + async for page in self.pages: + for response in page.pipeline_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index 41123b8615..70ad468804 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -26,6 +26,8 @@ from google.api_core import operations_v1 # type: ignore from google.auth import credentials # type: ignore +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( @@ -76,10 +78,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -87,6 +89,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -96,20 +101,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -138,6 +140,21 @@ def _prep_wrapped_messages(self, client_info): default_timeout=5.0, client_info=client_info, ), + self.create_pipeline_job: gapic_v1.method.wrap_method( + self.create_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.get_pipeline_job: gapic_v1.method.wrap_method( + self.get_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.list_pipeline_jobs: gapic_v1.method.wrap_method( + self.list_pipeline_jobs, default_timeout=None, client_info=client_info, + ), + self.delete_pipeline_job: gapic_v1.method.wrap_method( + self.delete_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.cancel_pipeline_job: gapic_v1.method.wrap_method( + self.cancel_pipeline_job, default_timeout=None, client_info=client_info, + ), } @property @@ -199,5 +216,57 @@ def cancel_training_pipeline( ]: raise NotImplementedError() + @property + def create_pipeline_job( + self, + ) -> typing.Callable[ + [pipeline_service.CreatePipelineJobRequest], + typing.Union[ + gca_pipeline_job.PipelineJob, typing.Awaitable[gca_pipeline_job.PipelineJob] + ], + ]: + raise NotImplementedError() + + @property + def get_pipeline_job( + self, + ) -> typing.Callable[ + [pipeline_service.GetPipelineJobRequest], + typing.Union[ + pipeline_job.PipelineJob, typing.Awaitable[pipeline_job.PipelineJob] + ], + ]: + raise NotImplementedError() + + @property + def list_pipeline_jobs( + self, + ) -> typing.Callable[ + [pipeline_service.ListPipelineJobsRequest], + typing.Union[ + pipeline_service.ListPipelineJobsResponse, + typing.Awaitable[pipeline_service.ListPipelineJobsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_pipeline_job( + self, + ) -> typing.Callable[ + [pipeline_service.DeletePipelineJobRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_pipeline_job( + self, + ) -> typing.Callable[ + [pipeline_service.CancelPipelineJobRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + __all__ = ("PipelineServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 83383d9e87..d05a753e82 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -27,6 +27,8 @@ import grpc # type: ignore +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( @@ -113,7 +115,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -121,70 +126,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -192,18 +177,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -217,7 +192,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -252,7 +227,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property @@ -394,15 +370,15 @@ def cancel_training_pipeline( Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to ``CANCELLED``. Returns: @@ -423,5 +399,153 @@ def cancel_training_pipeline( ) return self._stubs["cancel_training_pipeline"] + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], gca_pipeline_job.PipelineJob + ]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_pipeline_job" not in self._stubs: + self._stubs["create_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob", + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["create_pipeline_job"] + + @property + def get_pipeline_job( + self, + ) -> Callable[[pipeline_service.GetPipelineJobRequest], pipeline_job.PipelineJob]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pipeline_job" not in self._stubs: + self._stubs["get_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob", + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["get_pipeline_job"] + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + pipeline_service.ListPipelineJobsResponse, + ]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + ~.ListPipelineJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pipeline_jobs" not in self._stubs: + self._stubs["list_pipeline_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs", + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs["list_pipeline_jobs"] + + @property + def delete_pipeline_job( + self, + ) -> Callable[[pipeline_service.DeletePipelineJobRequest], operations.Operation]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_pipeline_job" not in self._stubs: + self._stubs["delete_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob", + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_pipeline_job"] + + @property + def cancel_pipeline_job( + self, + ) -> Callable[[pipeline_service.CancelPipelineJobRequest], empty.Empty]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_pipeline_job" not in self._stubs: + self._stubs["cancel_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob", + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["cancel_pipeline_job"] + __all__ = ("PipelineServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index 76f21faf50..6c74b1d05a 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -28,6 +28,8 @@ import grpc # type: ignore from grpc.experimental import aio # type: ignore +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( @@ -68,7 +70,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -146,10 +148,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -158,7 +160,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -166,70 +171,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -237,18 +222,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: @@ -404,15 +379,15 @@ def cancel_training_pipeline( Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - ``PipelineService.GetTrainingPipeline`` + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - ``TrainingPipeline.error`` - value with a ``google.rpc.Status.code`` of + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and - ``TrainingPipeline.state`` + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to ``CANCELLED``. Returns: @@ -433,5 +408,158 @@ def cancel_training_pipeline( ) return self._stubs["cancel_training_pipeline"] + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Awaitable[gca_pipeline_job.PipelineJob], + ]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_pipeline_job" not in self._stubs: + self._stubs["create_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob", + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["create_pipeline_job"] + + @property + def get_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.GetPipelineJobRequest], Awaitable[pipeline_job.PipelineJob] + ]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pipeline_job" not in self._stubs: + self._stubs["get_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob", + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["get_pipeline_job"] + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Awaitable[pipeline_service.ListPipelineJobsResponse], + ]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + Awaitable[~.ListPipelineJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pipeline_jobs" not in self._stubs: + self._stubs["list_pipeline_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs", + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs["list_pipeline_jobs"] + + @property + def delete_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_pipeline_job" not in self._stubs: + self._stubs["delete_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob", + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_pipeline_job"] + + @property + def cancel_pipeline_job( + self, + ) -> Callable[[pipeline_service.CancelPipelineJobRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_pipeline_job" not in self._stubs: + self._stubs["cancel_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob", + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["cancel_pipeline_job"] + __all__ = ("PipelineServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 4d69a6635f..2d651938f6 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -186,7 +186,7 @@ async def predict( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.PredictRequest`): The request object. Request message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. endpoint (:class:`str`): Required. The name of the Endpoint requested to serve the prediction. Format: @@ -206,7 +206,7 @@ async def predict( Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this @@ -217,7 +217,7 @@ async def predict( DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this @@ -232,7 +232,7 @@ async def predict( Returns: google.cloud.aiplatform_v1beta1.types.PredictResponse: Response message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. """ # Create or coerce a protobuf request object. @@ -293,20 +293,20 @@ async def explain( r"""Perform an online explanation. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified, the corresponding DeployModel must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is not specified, all DeployedModels must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. Only deployed AutoML tabular Models have explanation_spec. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ExplainRequest`): The request object. Request message for - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. endpoint (:class:`str`): Required. The name of the Endpoint requested to serve the explanation. Format: @@ -326,7 +326,7 @@ async def explain( specified via Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this @@ -337,7 +337,7 @@ async def explain( DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this @@ -345,7 +345,7 @@ async def explain( deployed_model_id (:class:`str`): If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding - ``Endpoint.traffic_split``. + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -360,7 +360,7 @@ async def explain( Returns: google.cloud.aiplatform_v1beta1.types.ExplainResponse: Response message for - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index 042307eca1..72f8c1541d 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -362,7 +362,7 @@ def predict( Args: request (google.cloud.aiplatform_v1beta1.types.PredictRequest): The request object. Request message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. endpoint (str): Required. The name of the Endpoint requested to serve the prediction. Format: @@ -382,7 +382,7 @@ def predict( Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this @@ -393,7 +393,7 @@ def predict( DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this @@ -408,7 +408,7 @@ def predict( Returns: google.cloud.aiplatform_v1beta1.types.PredictResponse: Response message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. """ # Create or coerce a protobuf request object. @@ -469,20 +469,20 @@ def explain( r"""Perform an online explanation. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified, the corresponding DeployModel must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is not specified, all DeployedModels must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. Only deployed AutoML tabular Models have explanation_spec. Args: request (google.cloud.aiplatform_v1beta1.types.ExplainRequest): The request object. Request message for - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. endpoint (str): Required. The name of the Endpoint requested to serve the explanation. Format: @@ -502,7 +502,7 @@ def explain( specified via Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this @@ -513,7 +513,7 @@ def explain( DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this @@ -521,7 +521,7 @@ def explain( deployed_model_id (str): If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding - ``Endpoint.traffic_split``. + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -536,7 +536,7 @@ def explain( Returns: google.cloud.aiplatform_v1beta1.types.ExplainResponse: Response message for - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index 0c82f7d83c..df601f6bdd 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -69,10 +69,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -80,6 +80,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -89,20 +92,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index f3b9be0c3d..cd3390b5b9 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -106,7 +106,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -114,70 +116,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -185,17 +167,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -209,7 +182,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -244,7 +217,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property @@ -286,13 +260,13 @@ def explain( Perform an online explanation. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified, the corresponding DeployModel must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is not specified, all DeployedModels must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. Only deployed AutoML tabular Models have explanation_spec. diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index e1493acc9c..a918f991f5 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -61,7 +61,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -139,10 +139,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -151,7 +151,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -159,70 +161,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -230,17 +212,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: @@ -293,13 +266,13 @@ def explain( Perform an online explanation. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified, the corresponding DeployModel must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. If - ``deployed_model_id`` + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is not specified, all DeployedModels must have - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] populated. Only deployed AutoML tabular Models have explanation_spec. diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 6907135b53..c87486e729 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -28,7 +28,7 @@ from google.auth import credentials # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -204,7 +204,7 @@ async def create_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. parent (:class:`str`): Required. The parent Project name for the new SpecialistPool. The form is @@ -303,7 +303,7 @@ async def get_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.GetSpecialistPool``. + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. name (:class:`str`): Required. The name of the SpecialistPool resource. The form is @@ -386,7 +386,7 @@ async def list_specialist_pools( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest`): The request object. Request message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. parent (:class:`str`): Required. The name of the SpecialistPool's parent resource. Format: @@ -405,7 +405,7 @@ async def list_specialist_pools( Returns: google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: Response message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. Iterating over this object will yield results and resolve additional pages automatically. @@ -470,7 +470,7 @@ async def delete_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.DeleteSpecialistPool``. + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. name (:class:`str`): Required. The resource name of the SpecialistPool to delete. Format: @@ -566,7 +566,7 @@ async def update_specialist_pool( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest`): The request object. Request message for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): Required. The SpecialistPool which replaces the resource on the server. diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index cde21b3720..c3f95f54ae 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -32,7 +32,7 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -from google.api_core import operation as ga_operation # type: ignore +from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -367,13 +367,13 @@ def create_specialist_pool( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Creates a SpecialistPool. Args: request (google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. parent (str): Required. The parent Project name for the new SpecialistPool. The form is @@ -449,7 +449,7 @@ def create_specialist_pool( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_specialist_pool.SpecialistPool, @@ -473,7 +473,7 @@ def get_specialist_pool( Args: request (google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.GetSpecialistPool``. + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. name (str): Required. The name of the SpecialistPool resource. The form is @@ -557,7 +557,7 @@ def list_specialist_pools( Args: request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): The request object. Request message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. parent (str): Required. The name of the SpecialistPool's parent resource. Format: @@ -576,7 +576,7 @@ def list_specialist_pools( Returns: google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: Response message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. Iterating over this object will yield results and resolve additional pages automatically. @@ -635,14 +635,14 @@ def delete_specialist_pool( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Deletes a SpecialistPool as well as all Specialists in the pool. Args: request (google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.DeleteSpecialistPool``. + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. name (str): Required. The resource name of the SpecialistPool to delete. Format: @@ -714,7 +714,7 @@ def delete_specialist_pool( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, empty.Empty, @@ -733,13 +733,13 @@ def update_specialist_pool( retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> ga_operation.Operation: + ) -> gac_operation.Operation: r"""Updates a SpecialistPool. Args: request (google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest): The request object. Request message for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): Required. The SpecialistPool which replaces the resource on the server. @@ -816,7 +816,7 @@ def update_specialist_pool( response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. - response = ga_operation.from_gapic( + response = gac_operation.from_gapic( response, self._transport.operations_client, gca_specialist_pool.SpecialistPool, diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py index f1af058030..48ee079a5c 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -72,10 +72,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -83,6 +83,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -92,20 +95,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py index dbc31f0c7e..c1f9300de8 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py @@ -114,7 +114,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -122,70 +125,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -193,18 +176,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -218,7 +191,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -253,7 +226,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py index a71d380b5b..592776b792 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -69,7 +69,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -147,10 +147,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -159,7 +159,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -167,70 +170,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -238,18 +221,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py new file mode 100644 index 0000000000..70277571f7 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import TensorboardServiceClient +from .async_client import TensorboardServiceAsyncClient + +__all__ = ( + "TensorboardServiceClient", + "TensorboardServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py new file mode 100644 index 0000000000..9370a0ada6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -0,0 +1,2346 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_experiment as gca_tensorboard_experiment, +) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_time_series as gca_tensorboard_time_series, +) +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport +from .client import TensorboardServiceClient + + +class TensorboardServiceAsyncClient: + """TensorboardService""" + + _client: TensorboardServiceClient + + DEFAULT_ENDPOINT = TensorboardServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = TensorboardServiceClient.DEFAULT_MTLS_ENDPOINT + + tensorboard_path = staticmethod(TensorboardServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod( + TensorboardServiceClient.parse_tensorboard_path + ) + tensorboard_experiment_path = staticmethod( + TensorboardServiceClient.tensorboard_experiment_path + ) + parse_tensorboard_experiment_path = staticmethod( + TensorboardServiceClient.parse_tensorboard_experiment_path + ) + tensorboard_run_path = staticmethod(TensorboardServiceClient.tensorboard_run_path) + parse_tensorboard_run_path = staticmethod( + TensorboardServiceClient.parse_tensorboard_run_path + ) + tensorboard_time_series_path = staticmethod( + TensorboardServiceClient.tensorboard_time_series_path + ) + parse_tensorboard_time_series_path = staticmethod( + TensorboardServiceClient.parse_tensorboard_time_series_path + ) + + common_billing_account_path = staticmethod( + TensorboardServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + TensorboardServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(TensorboardServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + TensorboardServiceClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + TensorboardServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + TensorboardServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod(TensorboardServiceClient.common_project_path) + parse_common_project_path = staticmethod( + TensorboardServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod(TensorboardServiceClient.common_location_path) + parse_common_location_path = staticmethod( + TensorboardServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TensorboardServiceTransport: + """Return the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(TensorboardServiceClient).get_transport_class, + type(TensorboardServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, TensorboardServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = TensorboardServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_tensorboard( + self, + request: tensorboard_service.CreateTensorboardRequest = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users’ training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.CreateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard( + self, + request: tensorboard_service.GetTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest`): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Tensorboard: + Tensorboard is a physical database + that stores users’ training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.GetTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_tensorboard( + self, + request: tensorboard_service.UpdateTensorboardRequest = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users’ training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.UpdateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard.name", request.tensorboard.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_tensorboards( + self, + request: tensorboard_service.ListTensorboardsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsAsyncPager: + r"""Lists Tensorboards in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest`): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + parent (:class:`str`): + Required. The resource name of the + Location to list Tensorboards. Format: + 'projects/{project}/locations/{location}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.ListTensorboardsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboards, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard( + self, + request: tensorboard_service.DeleteTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.DeleteTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_experiment( + self, + request: tensorboard_service.CreateTensorboardExperimentRequest = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + parent (:class:`str`): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (:class:`str`): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, tensorboard_experiment, tensorboard_experiment_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_tensorboard_experiment( + self, + request: tensorboard_service.GetTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.GetTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_tensorboard_experiment( + self, + request: tensorboard_service.UpdateTensorboardExperimentRequest = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_experiment.name", request.tensorboard_experiment.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_tensorboard_experiments( + self, + request: tensorboard_service.ListTensorboardExperimentsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsAsyncPager: + r"""Lists TensorboardExperiments in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest`): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + parent (:class:`str`): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_experiments, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardExperimentsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_experiment( + self, + request: tensorboard_service.DeleteTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_run( + self, + request: tensorboard_service.CreateTensorboardRunRequest = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + parent (:class:`str`): + Required. The resource name of the Tensorboard to create + the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (:class:`str`): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.CreateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_tensorboard_run( + self, + request: tensorboard_service.GetTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.GetTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_tensorboard_run( + self, + request: tensorboard_service.UpdateTensorboardRunRequest = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.UpdateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_run.name", request.tensorboard_run.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_tensorboard_runs( + self, + request: tensorboard_service.ListTensorboardRunsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsAsyncPager: + r"""Lists TensorboardRuns in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest`): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + parent (:class:`str`): + Required. The resource name of the + Tensorboard to list TensorboardRuns. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.ListTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_runs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardRunsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_run( + self, + request: tensorboard_service.DeleteTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.DeleteTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_time_series( + self, + request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_tensorboard_time_series( + self, + request: tensorboard_service.GetTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_tensorboard_time_series( + self, + request: tensorboard_service.UpdateTensorboardTimeSeriesRequest = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "tensorboard_time_series.name", + request.tensorboard_time_series.name, + ), + ) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_tensorboard_time_series( + self, + request: tensorboard_service.ListTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesAsyncPager: + r"""Lists TensorboardTimeSeries in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardTimeSeriesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_time_series( + self, + request: tensorboard_service.DeleteTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def read_tensorboard_time_series_data( + self, + request: tensorboard_service.ReadTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest`): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_time_series", request.tensorboard_time_series),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def read_tensorboard_blob_data( + self, + request: tensorboard_service.ReadTensorboardBlobDataRequest = None, + *, + time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[tensorboard_service.ReadTensorboardBlobDataResponse]]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest`): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_blob_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("time_series", request.time_series),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def write_tensorboard_run_data( + self, + request: tensorboard_service.WriteTensorboardRunDataRequest = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest`): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (:class:`str`): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]`): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + + if time_series_data: + request.time_series_data.extend(time_series_data) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_tensorboard_run_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_run", request.tensorboard_run),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def export_tensorboard_time_series_data( + self, + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataAsyncPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest`): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_time_series", request.tensorboard_time_series),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("TensorboardServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py new file mode 100644 index 0000000000..8395be0b16 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -0,0 +1,2647 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_experiment as gca_tensorboard_experiment, +) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_time_series as gca_tensorboard_time_series, +) +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import TensorboardServiceGrpcTransport +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +class TensorboardServiceClientMeta(type): + """Metaclass for the TensorboardService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[TensorboardServiceTransport]] + _transport_registry["grpc"] = TensorboardServiceGrpcTransport + _transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[TensorboardServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): + """TensorboardService""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TensorboardServiceTransport: + """Return the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def tensorboard_path(project: str, location: str, tensorboard: str,) -> str: + """Return a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format( + project=project, location=location, tensorboard=tensorboard, + ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str, str]: + """Parse a tensorboard path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_experiment_path( + project: str, location: str, tensorboard: str, experiment: str, + ) -> str: + """Return a fully-qualified tensorboard_experiment string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format( + project=project, + location=location, + tensorboard=tensorboard, + experiment=experiment, + ) + + @staticmethod + def parse_tensorboard_experiment_path(path: str) -> Dict[str, str]: + """Parse a tensorboard_experiment path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_run_path( + project: str, location: str, tensorboard: str, experiment: str, run: str, + ) -> str: + """Return a fully-qualified tensorboard_run string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format( + project=project, + location=location, + tensorboard=tensorboard, + experiment=experiment, + run=run, + ) + + @staticmethod + def parse_tensorboard_run_path(path: str) -> Dict[str, str]: + """Parse a tensorboard_run path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_time_series_path( + project: str, + location: str, + tensorboard: str, + experiment: str, + run: str, + time_series: str, + ) -> str: + """Return a fully-qualified tensorboard_time_series string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format( + project=project, + location=location, + tensorboard=tensorboard, + experiment=experiment, + run=run, + time_series=time_series, + ) + + @staticmethod + def parse_tensorboard_time_series_path(path: str) -> Dict[str, str]: + """Parse a tensorboard_time_series path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, TensorboardServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TensorboardServiceTransport): + # transport is a TensorboardServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_tensorboard( + self, + request: tensorboard_service.CreateTensorboardRequest = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + parent (str): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users’ training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRequest): + request = tensorboard_service.CreateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def get_tensorboard( + self, + request: tensorboard_service.GetTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Tensorboard: + Tensorboard is a physical database + that stores users’ training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRequest): + request = tensorboard_service.GetTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_tensorboard( + self, + request: tensorboard_service.UpdateTensorboardRequest = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users’ training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRequest): + request = tensorboard_service.UpdateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard.name", request.tensorboard.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def list_tensorboards( + self, + request: tensorboard_service.ListTensorboardsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsPager: + r"""Lists Tensorboards in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + parent (str): + Required. The resource name of the + Location to list Tensorboards. Format: + 'projects/{project}/locations/{location}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardsRequest): + request = tensorboard_service.ListTensorboardsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboards] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard( + self, + request: tensorboard_service.DeleteTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + name (str): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRequest): + request = tensorboard_service.DeleteTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_experiment( + self, + request: tensorboard_service.CreateTensorboardExperimentRequest = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + parent (str): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, tensorboard_experiment, tensorboard_experiment_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.CreateTensorboardExperimentRequest + ): + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_tensorboard_experiment + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_tensorboard_experiment( + self, + request: tensorboard_service.GetTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest): + request = tensorboard_service.GetTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_tensorboard_experiment + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_tensorboard_experiment( + self, + request: tensorboard_service.UpdateTensorboardExperimentRequest = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.UpdateTensorboardExperimentRequest + ): + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_tensorboard_experiment + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_experiment.name", request.tensorboard_experiment.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_tensorboard_experiments( + self, + request: tensorboard_service.ListTensorboardExperimentsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsPager: + r"""Lists TensorboardExperiments in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + parent (str): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardExperimentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.ListTensorboardExperimentsRequest + ): + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_tensorboard_experiments + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardExperimentsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_experiment( + self, + request: tensorboard_service.DeleteTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.DeleteTensorboardExperimentRequest + ): + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_tensorboard_experiment + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_run( + self, + request: tensorboard_service.CreateTensorboardRunRequest = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + parent (str): + Required. The resource name of the Tensorboard to create + the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest): + request = tensorboard_service.CreateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_tensorboard_run( + self, + request: tensorboard_service.GetTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + name (str): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRunRequest): + request = tensorboard_service.GetTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_tensorboard_run( + self, + request: tensorboard_service.UpdateTensorboardRunRequest = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest): + request = tensorboard_service.UpdateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_run.name", request.tensorboard_run.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_tensorboard_runs( + self, + request: tensorboard_service.ListTensorboardRunsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsPager: + r"""Lists TensorboardRuns in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardRuns. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardRunsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest): + request = tensorboard_service.ListTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardRunsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_run( + self, + request: tensorboard_service.DeleteTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest): + request = tensorboard_service.DeleteTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_time_series( + self, + request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + parent (str): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.CreateTensorboardTimeSeriesRequest + ): + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_tensorboard_time_series + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_tensorboard_time_series( + self, + request: tensorboard_service.GetTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest): + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_tensorboard_time_series + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_tensorboard_time_series( + self, + request: tensorboard_service.UpdateTensorboardTimeSeriesRequest = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.UpdateTensorboardTimeSeriesRequest + ): + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_tensorboard_time_series + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ( + "tensorboard_time_series.name", + request.tensorboard_time_series.name, + ), + ) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_tensorboard_time_series( + self, + request: tensorboard_service.ListTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesPager: + r"""Lists TensorboardTimeSeries in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + parent (str): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.ListTensorboardTimeSeriesRequest + ): + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_tensorboard_time_series + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardTimeSeriesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_time_series( + self, + request: tensorboard_service.DeleteTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.DeleteTensorboardTimeSeriesRequest + ): + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_tensorboard_time_series + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_time_series_data( + self, + request: tensorboard_service.ReadTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest + ): + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.read_tensorboard_time_series_data + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_time_series", request.tensorboard_time_series),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def read_tensorboard_blob_data( + self, + request: tensorboard_service.ReadTensorboardBlobDataRequest = None, + *, + time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + time_series (str): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardBlobDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest): + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.read_tensorboard_blob_data + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("time_series", request.time_series),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def write_tensorboard_run_data( + self, + request: tensorboard_service.WriteTensorboardRunDataRequest = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Args: + request (google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (str): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.WriteTensorboardRunDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest): + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if time_series_data is not None: + request.time_series_data = time_series_data + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.write_tensorboard_run_data + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_run", request.tensorboard_run),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def export_tensorboard_time_series_data( + self, + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest + ): + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.export_tensorboard_time_series_data + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_time_series", request.tensorboard_time_series),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("TensorboardServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py new file mode 100644 index 0000000000..acc2c40676 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py @@ -0,0 +1,700 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series + + +class ListTensorboardsPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., tensorboard_service.ListTensorboardsResponse], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard.Tensorboard]: + for page in self.pages: + yield from page.tensorboards + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTensorboardsAsyncPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardsResponse]], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard.Tensorboard]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboards: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., tensorboard_service.ListTensorboardExperimentsResponse], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_experiment.TensorboardExperiment]: + for page in self.pages: + yield from page.tensorboard_experiments + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsAsyncPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[tensorboard_service.ListTensorboardExperimentsResponse] + ], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_experiment.TensorboardExperiment]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_experiments: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., tensorboard_service.ListTensorboardRunsResponse], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_run.TensorboardRun]: + for page in self.pages: + yield from page.tensorboard_runs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsAsyncPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[tensorboard_service.ListTensorboardRunsResponse] + ], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_run.TensorboardRun]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_runs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., tensorboard_service.ListTensorboardTimeSeriesResponse], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_time_series.TensorboardTimeSeries]: + for page in self.pages: + yield from page.tensorboard_time_series + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesAsyncPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse] + ], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_time_series.TensorboardTimeSeries]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_time_series: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__iter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., tensorboard_service.ExportTensorboardTimeSeriesDataResponse + ], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_data.TimeSeriesDataPoint]: + for page in self.pages: + yield from page.time_series_data_points + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataAsyncPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse] + ], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_data.TimeSeriesDataPoint]: + async def async_generator(): + async for page in self.pages: + for response in page.time_series_data_points: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py new file mode 100644 index 0000000000..86ffc7d6b2 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import TensorboardServiceTransport +from .grpc import TensorboardServiceGrpcTransport +from .grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[TensorboardServiceTransport]] +_transport_registry["grpc"] = TensorboardServiceGrpcTransport +_transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport + +__all__ = ( + "TensorboardServiceTransport", + "TensorboardServiceGrpcTransport", + "TensorboardServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py new file mode 100644 index 0000000000..2e2dea1764 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -0,0 +1,509 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_experiment as gca_tensorboard_experiment, +) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_time_series as gca_tensorboard_time_series, +) +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TensorboardServiceTransport(abc.ABC): + """Abstract transport class for TensorboardService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=self._scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_tensorboard: gapic_v1.method.wrap_method( + self.create_tensorboard, default_timeout=None, client_info=client_info, + ), + self.get_tensorboard: gapic_v1.method.wrap_method( + self.get_tensorboard, default_timeout=None, client_info=client_info, + ), + self.update_tensorboard: gapic_v1.method.wrap_method( + self.update_tensorboard, default_timeout=None, client_info=client_info, + ), + self.list_tensorboards: gapic_v1.method.wrap_method( + self.list_tensorboards, default_timeout=None, client_info=client_info, + ), + self.delete_tensorboard: gapic_v1.method.wrap_method( + self.delete_tensorboard, default_timeout=None, client_info=client_info, + ), + self.create_tensorboard_experiment: gapic_v1.method.wrap_method( + self.create_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_experiment: gapic_v1.method.wrap_method( + self.get_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_experiment: gapic_v1.method.wrap_method( + self.update_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_experiments: gapic_v1.method.wrap_method( + self.list_tensorboard_experiments, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_experiment: gapic_v1.method.wrap_method( + self.delete_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_run: gapic_v1.method.wrap_method( + self.create_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_run: gapic_v1.method.wrap_method( + self.get_tensorboard_run, default_timeout=None, client_info=client_info, + ), + self.update_tensorboard_run: gapic_v1.method.wrap_method( + self.update_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_runs: gapic_v1.method.wrap_method( + self.list_tensorboard_runs, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_run: gapic_v1.method.wrap_method( + self.delete_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_time_series: gapic_v1.method.wrap_method( + self.create_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_time_series: gapic_v1.method.wrap_method( + self.get_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_time_series: gapic_v1.method.wrap_method( + self.update_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_time_series: gapic_v1.method.wrap_method( + self.list_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_time_series: gapic_v1.method.wrap_method( + self.delete_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.read_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_blob_data: gapic_v1.method.wrap_method( + self.read_tensorboard_blob_data, + default_timeout=None, + client_info=client_info, + ), + self.write_tensorboard_run_data: gapic_v1.method.wrap_method( + self.write_tensorboard_run_data, + default_timeout=None, + client_info=client_info, + ), + self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.export_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_tensorboard( + self, + ) -> typing.Callable[ + [tensorboard_service.CreateTensorboardRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_tensorboard( + self, + ) -> typing.Callable[ + [tensorboard_service.GetTensorboardRequest], + typing.Union[ + tensorboard.Tensorboard, typing.Awaitable[tensorboard.Tensorboard] + ], + ]: + raise NotImplementedError() + + @property + def update_tensorboard( + self, + ) -> typing.Callable[ + [tensorboard_service.UpdateTensorboardRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def list_tensorboards( + self, + ) -> typing.Callable[ + [tensorboard_service.ListTensorboardsRequest], + typing.Union[ + tensorboard_service.ListTensorboardsResponse, + typing.Awaitable[tensorboard_service.ListTensorboardsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_tensorboard( + self, + ) -> typing.Callable[ + [tensorboard_service.DeleteTensorboardRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def create_tensorboard_experiment( + self, + ) -> typing.Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + typing.Union[ + gca_tensorboard_experiment.TensorboardExperiment, + typing.Awaitable[gca_tensorboard_experiment.TensorboardExperiment], + ], + ]: + raise NotImplementedError() + + @property + def get_tensorboard_experiment( + self, + ) -> typing.Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + typing.Union[ + tensorboard_experiment.TensorboardExperiment, + typing.Awaitable[tensorboard_experiment.TensorboardExperiment], + ], + ]: + raise NotImplementedError() + + @property + def update_tensorboard_experiment( + self, + ) -> typing.Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + typing.Union[ + gca_tensorboard_experiment.TensorboardExperiment, + typing.Awaitable[gca_tensorboard_experiment.TensorboardExperiment], + ], + ]: + raise NotImplementedError() + + @property + def list_tensorboard_experiments( + self, + ) -> typing.Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + typing.Union[ + tensorboard_service.ListTensorboardExperimentsResponse, + typing.Awaitable[tensorboard_service.ListTensorboardExperimentsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_tensorboard_experiment( + self, + ) -> typing.Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def create_tensorboard_run( + self, + ) -> typing.Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + typing.Union[ + gca_tensorboard_run.TensorboardRun, + typing.Awaitable[gca_tensorboard_run.TensorboardRun], + ], + ]: + raise NotImplementedError() + + @property + def get_tensorboard_run( + self, + ) -> typing.Callable[ + [tensorboard_service.GetTensorboardRunRequest], + typing.Union[ + tensorboard_run.TensorboardRun, + typing.Awaitable[tensorboard_run.TensorboardRun], + ], + ]: + raise NotImplementedError() + + @property + def update_tensorboard_run( + self, + ) -> typing.Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + typing.Union[ + gca_tensorboard_run.TensorboardRun, + typing.Awaitable[gca_tensorboard_run.TensorboardRun], + ], + ]: + raise NotImplementedError() + + @property + def list_tensorboard_runs( + self, + ) -> typing.Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + typing.Union[ + tensorboard_service.ListTensorboardRunsResponse, + typing.Awaitable[tensorboard_service.ListTensorboardRunsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_tensorboard_run( + self, + ) -> typing.Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def create_tensorboard_time_series( + self, + ) -> typing.Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + typing.Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + typing.Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries], + ], + ]: + raise NotImplementedError() + + @property + def get_tensorboard_time_series( + self, + ) -> typing.Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + typing.Union[ + tensorboard_time_series.TensorboardTimeSeries, + typing.Awaitable[tensorboard_time_series.TensorboardTimeSeries], + ], + ]: + raise NotImplementedError() + + @property + def update_tensorboard_time_series( + self, + ) -> typing.Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + typing.Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + typing.Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries], + ], + ]: + raise NotImplementedError() + + @property + def list_tensorboard_time_series( + self, + ) -> typing.Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + typing.Union[ + tensorboard_service.ListTensorboardTimeSeriesResponse, + typing.Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_tensorboard_time_series( + self, + ) -> typing.Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def read_tensorboard_time_series_data( + self, + ) -> typing.Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + typing.Union[ + tensorboard_service.ReadTensorboardTimeSeriesDataResponse, + typing.Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse], + ], + ]: + raise NotImplementedError() + + @property + def read_tensorboard_blob_data( + self, + ) -> typing.Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + typing.Union[ + tensorboard_service.ReadTensorboardBlobDataResponse, + typing.Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse], + ], + ]: + raise NotImplementedError() + + @property + def write_tensorboard_run_data( + self, + ) -> typing.Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + typing.Union[ + tensorboard_service.WriteTensorboardRunDataResponse, + typing.Awaitable[tensorboard_service.WriteTensorboardRunDataResponse], + ], + ]: + raise NotImplementedError() + + @property + def export_tensorboard_time_series_data( + self, + ) -> typing.Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + typing.Union[ + tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + typing.Awaitable[ + tensorboard_service.ExportTensorboardTimeSeriesDataResponse + ], + ], + ]: + raise NotImplementedError() + + +__all__ = ("TensorboardServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py new file mode 100644 index 0000000000..02f697b2ae --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -0,0 +1,962 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_experiment as gca_tensorboard_experiment, +) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_time_series as gca_tensorboard_time_series, +) +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO + + +class TensorboardServiceGrpcTransport(TensorboardServiceTransport): + """gRPC backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard( + self, + ) -> Callable[[tensorboard_service.CreateTensorboardRequest], operations.Operation]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard" not in self._stubs: + self._stubs["create_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard", + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_tensorboard"] + + @property + def get_tensorboard( + self, + ) -> Callable[[tensorboard_service.GetTensorboardRequest], tensorboard.Tensorboard]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + ~.Tensorboard]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard" not in self._stubs: + self._stubs["get_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard", + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs["get_tensorboard"] + + @property + def update_tensorboard( + self, + ) -> Callable[[tensorboard_service.UpdateTensorboardRequest], operations.Operation]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard" not in self._stubs: + self._stubs["update_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard", + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_tensorboard"] + + @property + def list_tensorboards( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + tensorboard_service.ListTensorboardsResponse, + ]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + ~.ListTensorboardsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboards" not in self._stubs: + self._stubs["list_tensorboards"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards", + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs["list_tensorboards"] + + @property + def delete_tensorboard( + self, + ) -> Callable[[tensorboard_service.DeleteTensorboardRequest], operations.Operation]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard" not in self._stubs: + self._stubs["delete_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard", + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard"] + + @property + def create_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment, + ]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard_experiment" not in self._stubs: + self._stubs[ + "create_tensorboard_experiment" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment", + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs["create_tensorboard_experiment"] + + @property + def get_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + tensorboard_experiment.TensorboardExperiment, + ]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard_experiment" not in self._stubs: + self._stubs["get_tensorboard_experiment"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment", + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs["get_tensorboard_experiment"] + + @property + def update_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment, + ]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard_experiment" not in self._stubs: + self._stubs[ + "update_tensorboard_experiment" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment", + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs["update_tensorboard_experiment"] + + @property + def list_tensorboard_experiments( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + tensorboard_service.ListTensorboardExperimentsResponse, + ]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + ~.ListTensorboardExperimentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboard_experiments" not in self._stubs: + self._stubs["list_tensorboard_experiments"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments", + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs["list_tensorboard_experiments"] + + @property + def delete_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], operations.Operation + ]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard_experiment" not in self._stubs: + self._stubs[ + "delete_tensorboard_experiment" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment", + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard_experiment"] + + @property + def create_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun, + ]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard_run" not in self._stubs: + self._stubs["create_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun", + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs["create_tensorboard_run"] + + @property + def get_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], tensorboard_run.TensorboardRun + ]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard_run" not in self._stubs: + self._stubs["get_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun", + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs["get_tensorboard_run"] + + @property + def update_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun, + ]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard_run" not in self._stubs: + self._stubs["update_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun", + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs["update_tensorboard_run"] + + @property + def list_tensorboard_runs( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + tensorboard_service.ListTensorboardRunsResponse, + ]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + ~.ListTensorboardRunsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboard_runs" not in self._stubs: + self._stubs["list_tensorboard_runs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns", + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs["list_tensorboard_runs"] + + @property + def delete_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], operations.Operation + ]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard_run" not in self._stubs: + self._stubs["delete_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun", + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard_run"] + + @property + def create_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries, + ]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard_time_series" not in self._stubs: + self._stubs[ + "create_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries", + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs["create_tensorboard_time_series"] + + @property + def get_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + tensorboard_time_series.TensorboardTimeSeries, + ]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard_time_series" not in self._stubs: + self._stubs["get_tensorboard_time_series"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries", + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs["get_tensorboard_time_series"] + + @property + def update_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries, + ]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard_time_series" not in self._stubs: + self._stubs[ + "update_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries", + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs["update_tensorboard_time_series"] + + @property + def list_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + tensorboard_service.ListTensorboardTimeSeriesResponse, + ]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + ~.ListTensorboardTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboard_time_series" not in self._stubs: + self._stubs["list_tensorboard_time_series"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries", + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs["list_tensorboard_time_series"] + + @property + def delete_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], operations.Operation + ]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard_time_series" not in self._stubs: + self._stubs[ + "delete_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries", + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard_time_series"] + + @property + def read_tensorboard_time_series_data( + self, + ) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + tensorboard_service.ReadTensorboardTimeSeriesDataResponse, + ]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + ~.ReadTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_tensorboard_time_series_data" not in self._stubs: + self._stubs[ + "read_tensorboard_time_series_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData", + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs["read_tensorboard_time_series_data"] + + @property + def read_tensorboard_blob_data( + self, + ) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + tensorboard_service.ReadTensorboardBlobDataResponse, + ]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + ~.ReadTensorboardBlobDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_tensorboard_blob_data" not in self._stubs: + self._stubs["read_tensorboard_blob_data"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData", + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs["read_tensorboard_blob_data"] + + @property + def write_tensorboard_run_data( + self, + ) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + tensorboard_service.WriteTensorboardRunDataResponse, + ]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + ~.WriteTensorboardRunDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_tensorboard_run_data" not in self._stubs: + self._stubs["write_tensorboard_run_data"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData", + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs["write_tensorboard_run_data"] + + @property + def export_tensorboard_time_series_data( + self, + ) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + ]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + ~.ExportTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_tensorboard_time_series_data" not in self._stubs: + self._stubs[ + "export_tensorboard_time_series_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData", + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs["export_tensorboard_time_series_data"] + + +__all__ = ("TensorboardServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..d49895cdad --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py @@ -0,0 +1,980 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_experiment as gca_tensorboard_experiment, +) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_time_series as gca_tensorboard_time_series, +) +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import TensorboardServiceGrpcTransport + + +class TensorboardServiceGrpcAsyncIOTransport(TensorboardServiceTransport): + """gRPC AsyncIO backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard( + self, + ) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard" not in self._stubs: + self._stubs["create_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard", + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_tensorboard"] + + @property + def get_tensorboard( + self, + ) -> Callable[ + [tensorboard_service.GetTensorboardRequest], Awaitable[tensorboard.Tensorboard] + ]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + Awaitable[~.Tensorboard]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard" not in self._stubs: + self._stubs["get_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard", + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs["get_tensorboard"] + + @property + def update_tensorboard( + self, + ) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard" not in self._stubs: + self._stubs["update_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard", + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_tensorboard"] + + @property + def list_tensorboards( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + Awaitable[tensorboard_service.ListTensorboardsResponse], + ]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + Awaitable[~.ListTensorboardsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboards" not in self._stubs: + self._stubs["list_tensorboards"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards", + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs["list_tensorboards"] + + @property + def delete_tensorboard( + self, + ) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard" not in self._stubs: + self._stubs["delete_tensorboard"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard", + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard"] + + @property + def create_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment], + ]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard_experiment" not in self._stubs: + self._stubs[ + "create_tensorboard_experiment" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment", + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs["create_tensorboard_experiment"] + + @property + def get_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + Awaitable[tensorboard_experiment.TensorboardExperiment], + ]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard_experiment" not in self._stubs: + self._stubs["get_tensorboard_experiment"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment", + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs["get_tensorboard_experiment"] + + @property + def update_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment], + ]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard_experiment" not in self._stubs: + self._stubs[ + "update_tensorboard_experiment" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment", + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs["update_tensorboard_experiment"] + + @property + def list_tensorboard_experiments( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + Awaitable[tensorboard_service.ListTensorboardExperimentsResponse], + ]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + Awaitable[~.ListTensorboardExperimentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboard_experiments" not in self._stubs: + self._stubs["list_tensorboard_experiments"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments", + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs["list_tensorboard_experiments"] + + @property + def delete_tensorboard_experiment( + self, + ) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard_experiment" not in self._stubs: + self._stubs[ + "delete_tensorboard_experiment" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment", + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard_experiment"] + + @property + def create_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun], + ]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard_run" not in self._stubs: + self._stubs["create_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun", + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs["create_tensorboard_run"] + + @property + def get_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + Awaitable[tensorboard_run.TensorboardRun], + ]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard_run" not in self._stubs: + self._stubs["get_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun", + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs["get_tensorboard_run"] + + @property + def update_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun], + ]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard_run" not in self._stubs: + self._stubs["update_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun", + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs["update_tensorboard_run"] + + @property + def list_tensorboard_runs( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + Awaitable[tensorboard_service.ListTensorboardRunsResponse], + ]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + Awaitable[~.ListTensorboardRunsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboard_runs" not in self._stubs: + self._stubs["list_tensorboard_runs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns", + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs["list_tensorboard_runs"] + + @property + def delete_tensorboard_run( + self, + ) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard_run" not in self._stubs: + self._stubs["delete_tensorboard_run"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun", + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard_run"] + + @property + def create_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries], + ]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tensorboard_time_series" not in self._stubs: + self._stubs[ + "create_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries", + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs["create_tensorboard_time_series"] + + @property + def get_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + Awaitable[tensorboard_time_series.TensorboardTimeSeries], + ]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tensorboard_time_series" not in self._stubs: + self._stubs["get_tensorboard_time_series"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries", + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs["get_tensorboard_time_series"] + + @property + def update_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries], + ]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_tensorboard_time_series" not in self._stubs: + self._stubs[ + "update_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries", + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs["update_tensorboard_time_series"] + + @property + def list_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse], + ]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + Awaitable[~.ListTensorboardTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tensorboard_time_series" not in self._stubs: + self._stubs["list_tensorboard_time_series"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries", + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs["list_tensorboard_time_series"] + + @property + def delete_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_tensorboard_time_series" not in self._stubs: + self._stubs[ + "delete_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries", + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_tensorboard_time_series"] + + @property + def read_tensorboard_time_series_data( + self, + ) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse], + ]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + Awaitable[~.ReadTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_tensorboard_time_series_data" not in self._stubs: + self._stubs[ + "read_tensorboard_time_series_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData", + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs["read_tensorboard_time_series_data"] + + @property + def read_tensorboard_blob_data( + self, + ) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse], + ]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + Awaitable[~.ReadTensorboardBlobDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_tensorboard_blob_data" not in self._stubs: + self._stubs["read_tensorboard_blob_data"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData", + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs["read_tensorboard_blob_data"] + + @property + def write_tensorboard_run_data( + self, + ) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + Awaitable[tensorboard_service.WriteTensorboardRunDataResponse], + ]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + Awaitable[~.WriteTensorboardRunDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_tensorboard_run_data" not in self._stubs: + self._stubs["write_tensorboard_run_data"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData", + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs["write_tensorboard_run_data"] + + @property + def export_tensorboard_time_series_data( + self, + ) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse], + ]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + Awaitable[~.ExportTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_tensorboard_time_series_data" not in self._stubs: + self._stubs[ + "export_tensorboard_time_series_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData", + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs["export_tensorboard_time_series_data"] + + +__all__ = ("TensorboardServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index 4bd90a79cd..6c29a31eb4 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -198,7 +198,7 @@ async def create_study( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateStudyRequest`): The request object. Request message for - ``VizierService.CreateStudy``. + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. parent (:class:`str`): Required. The resource name of the Location to create the CustomJob in. Format: @@ -279,7 +279,7 @@ async def get_study( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetStudyRequest`): The request object. Request message for - ``VizierService.GetStudy``. + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. name (:class:`str`): Required. The name of the Study resource. Format: ``projects/{project}/locations/{location}/studies/{study}`` @@ -351,7 +351,7 @@ async def list_studies( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListStudiesRequest`): The request object. Request message for - ``VizierService.ListStudies``. + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. parent (:class:`str`): Required. The resource name of the Location to list the Study from. Format: @@ -370,7 +370,7 @@ async def list_studies( Returns: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesAsyncPager: Response message for - ``VizierService.ListStudies``. + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. Iterating over this object will yield results and resolve additional pages automatically. @@ -434,7 +434,7 @@ async def delete_study( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest`): The request object. Request message for - ``VizierService.DeleteStudy``. + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. name (:class:`str`): Required. The name of the Study resource to be deleted. Format: @@ -502,7 +502,7 @@ async def lookup_study( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.LookupStudyRequest`): The request object. Request message for - ``VizierService.LookupStudy``. + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. parent (:class:`str`): Required. The resource name of the Location to get the Study from. Format: @@ -572,12 +572,12 @@ async def suggest_trials( suggested by AI Platform Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a - ``SuggestTrialsResponse``. + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest`): The request object. Request message for - ``VizierService.SuggestTrials``. + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -592,7 +592,7 @@ async def suggest_trials( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` Response message for - ``VizierService.SuggestTrials``. + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. """ # Create or coerce a protobuf request object. @@ -642,7 +642,7 @@ async def create_trial( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTrialRequest`): The request object. Request message for - ``VizierService.CreateTrial``. + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. parent (:class:`str`): Required. The resource name of the Study to create the Trial in. Format: @@ -726,7 +726,7 @@ async def get_trial( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.GetTrialRequest`): The request object. Request message for - ``VizierService.GetTrial``. + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. name (:class:`str`): Required. The name of the Trial resource. Format: ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` @@ -802,7 +802,7 @@ async def list_trials( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListTrialsRequest`): The request object. Request message for - ``VizierService.ListTrials``. + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. parent (:class:`str`): Required. The resource name of the Study to list the Trial from. Format: @@ -821,7 +821,7 @@ async def list_trials( Returns: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsAsyncPager: Response message for - ``VizierService.ListTrials``. + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. Iterating over this object will yield results and resolve additional pages automatically. @@ -886,7 +886,7 @@ async def add_trial_measurement( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest`): The request object. Request message for - ``VizierService.AddTrialMeasurement``. + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -942,7 +942,7 @@ async def complete_trial( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest`): The request object. Request message for - ``VizierService.CompleteTrial``. + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -997,7 +997,7 @@ async def delete_trial( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest`): The request object. Request message for - ``VizierService.DeleteTrial``. + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. name (:class:`str`): Required. The Trial's name. Format: ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` @@ -1060,12 +1060,12 @@ async def check_trial_early_stopping_state( r"""Checks whether a Trial should stop or not. Returns a long-running operation. When the operation is successful, it will contain a - ``CheckTrialEarlyStoppingStateResponse``. + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest`): The request object. Request message for - ``VizierService.CheckTrialEarlyStoppingState``. + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -1080,7 +1080,7 @@ async def check_trial_early_stopping_state( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` Response message for - ``VizierService.CheckTrialEarlyStoppingState``. + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. """ # Create or coerce a protobuf request object. @@ -1130,7 +1130,7 @@ async def stop_trial( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.StopTrialRequest`): The request object. Request message for - ``VizierService.StopTrial``. + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -1188,7 +1188,7 @@ async def list_optimal_trials( Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest`): The request object. Request message for - ``VizierService.ListOptimalTrials``. + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. parent (:class:`str`): Required. The name of the Study that the optimal Trial belongs to. @@ -1206,7 +1206,7 @@ async def list_optimal_trials( Returns: google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: Response message for - ``VizierService.ListOptimalTrials``. + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index 85e381323d..23d7091c9c 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -398,7 +398,7 @@ def create_study( Args: request (google.cloud.aiplatform_v1beta1.types.CreateStudyRequest): The request object. Request message for - ``VizierService.CreateStudy``. + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. parent (str): Required. The resource name of the Location to create the CustomJob in. Format: @@ -480,7 +480,7 @@ def get_study( Args: request (google.cloud.aiplatform_v1beta1.types.GetStudyRequest): The request object. Request message for - ``VizierService.GetStudy``. + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. name (str): Required. The name of the Study resource. Format: ``projects/{project}/locations/{location}/studies/{study}`` @@ -553,7 +553,7 @@ def list_studies( Args: request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): The request object. Request message for - ``VizierService.ListStudies``. + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. parent (str): Required. The resource name of the Location to list the Study from. Format: @@ -572,7 +572,7 @@ def list_studies( Returns: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesPager: Response message for - ``VizierService.ListStudies``. + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. Iterating over this object will yield results and resolve additional pages automatically. @@ -637,7 +637,7 @@ def delete_study( Args: request (google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest): The request object. Request message for - ``VizierService.DeleteStudy``. + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. name (str): Required. The name of the Study resource to be deleted. Format: @@ -706,7 +706,7 @@ def lookup_study( Args: request (google.cloud.aiplatform_v1beta1.types.LookupStudyRequest): The request object. Request message for - ``VizierService.LookupStudy``. + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. parent (str): Required. The resource name of the Location to get the Study from. Format: @@ -777,12 +777,12 @@ def suggest_trials( suggested by AI Platform Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a - ``SuggestTrialsResponse``. + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Args: request (google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest): The request object. Request message for - ``VizierService.SuggestTrials``. + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -797,7 +797,7 @@ def suggest_trials( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` Response message for - ``VizierService.SuggestTrials``. + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. """ # Create or coerce a protobuf request object. @@ -848,7 +848,7 @@ def create_trial( Args: request (google.cloud.aiplatform_v1beta1.types.CreateTrialRequest): The request object. Request message for - ``VizierService.CreateTrial``. + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. parent (str): Required. The resource name of the Study to create the Trial in. Format: @@ -933,7 +933,7 @@ def get_trial( Args: request (google.cloud.aiplatform_v1beta1.types.GetTrialRequest): The request object. Request message for - ``VizierService.GetTrial``. + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. name (str): Required. The name of the Trial resource. Format: ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` @@ -1010,7 +1010,7 @@ def list_trials( Args: request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): The request object. Request message for - ``VizierService.ListTrials``. + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. parent (str): Required. The resource name of the Study to list the Trial from. Format: @@ -1029,7 +1029,7 @@ def list_trials( Returns: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsPager: Response message for - ``VizierService.ListTrials``. + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. Iterating over this object will yield results and resolve additional pages automatically. @@ -1095,7 +1095,7 @@ def add_trial_measurement( Args: request (google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest): The request object. Request message for - ``VizierService.AddTrialMeasurement``. + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -1152,7 +1152,7 @@ def complete_trial( Args: request (google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest): The request object. Request message for - ``VizierService.CompleteTrial``. + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -1208,7 +1208,7 @@ def delete_trial( Args: request (google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest): The request object. Request message for - ``VizierService.DeleteTrial``. + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. name (str): Required. The Trial's name. Format: ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` @@ -1272,12 +1272,12 @@ def check_trial_early_stopping_state( r"""Checks whether a Trial should stop or not. Returns a long-running operation. When the operation is successful, it will contain a - ``CheckTrialEarlyStoppingStateResponse``. + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. Args: request (google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest): The request object. Request message for - ``VizierService.CheckTrialEarlyStoppingState``. + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -1292,7 +1292,7 @@ def check_trial_early_stopping_state( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` Response message for - ``VizierService.CheckTrialEarlyStoppingState``. + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. """ # Create or coerce a protobuf request object. @@ -1345,7 +1345,7 @@ def stop_trial( Args: request (google.cloud.aiplatform_v1beta1.types.StopTrialRequest): The request object. Request message for - ``VizierService.StopTrial``. + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -1404,7 +1404,7 @@ def list_optimal_trials( Args: request (google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest): The request object. Request message for - ``VizierService.ListOptimalTrials``. + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. parent (str): Required. The name of the Study that the optimal Trial belongs to. @@ -1422,7 +1422,7 @@ def list_optimal_trials( Returns: google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: Response message for - ``VizierService.ListOptimalTrials``. + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py index 2fdfb4b13f..f09cd934b7 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py @@ -74,10 +74,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -85,6 +85,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -94,20 +97,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py index 388d2746f5..2e569f1248 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -114,7 +114,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -122,70 +125,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -193,18 +176,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -218,7 +191,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -253,7 +226,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" + """Return the channel designed to connect to this service. + """ return self._grpc_channel @property @@ -413,7 +387,7 @@ def suggest_trials( suggested by AI Platform Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a - ``SuggestTrialsResponse``. + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Returns: Callable[[~.SuggestTrialsRequest], @@ -603,7 +577,7 @@ def check_trial_early_stopping_state( Checks whether a Trial should stop or not. Returns a long-running operation. When the operation is successful, it will contain a - ``CheckTrialEarlyStoppingStateResponse``. + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. Returns: Callable[[~.CheckTrialEarlyStoppingStateRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py index 82e28342a4..64bcc08c34 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -69,7 +69,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -147,10 +147,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -159,7 +159,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -167,70 +170,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -238,18 +221,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: @@ -425,7 +398,7 @@ def suggest_trials( suggested by AI Platform Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a - ``SuggestTrialsResponse``. + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Returns: Callable[[~.SuggestTrialsRequest], @@ -618,7 +591,7 @@ def check_trial_early_stopping_state( Checks whether a Trial should stop or not. Returns a long-running operation. When the operation is successful, it will contain a - ``CheckTrialEarlyStoppingStateResponse``. + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. Returns: Callable[[~.CheckTrialEarlyStoppingStateRequest], diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 2d2368df8c..0b02ac1777 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -17,8 +17,10 @@ from .annotation import Annotation from .annotation_spec import AnnotationSpec +from .artifact import Artifact from .batch_prediction_job import BatchPredictionJob from .completion_stats import CompletionStats +from .context import Context from .custom_job import ( ContainerSpec, CustomJob, @@ -59,6 +61,7 @@ ListDatasetsResponse, UpdateDatasetRequest, ) +from .deployed_index_ref import DeployedIndexRef from .deployed_model_ref import DeployedModelRef from .encryption_spec import EncryptionSpec from .endpoint import ( @@ -80,7 +83,10 @@ UndeployModelResponse, UpdateEndpointRequest, ) +from .entity_type import EntityType from .env_var import EnvVar +from .event import Event +from .execution import Execution from .explanation import ( Attribution, Explanation, @@ -96,13 +102,105 @@ XraiAttribution, ) from .explanation_metadata import ExplanationMetadata +from .feature import Feature +from .feature_monitoring_stats import FeatureStatsAnomaly +from .feature_selector import ( + FeatureSelector, + IdMatcher, +) +from .featurestore import Featurestore +from .featurestore_monitoring import FeaturestoreMonitoringConfig +from .featurestore_online_service import ( + FeatureValue, + FeatureValueList, + ReadFeatureValuesRequest, + ReadFeatureValuesResponse, + StreamingReadFeatureValuesRequest, +) +from .featurestore_service import ( + BatchCreateFeaturesOperationMetadata, + BatchCreateFeaturesRequest, + BatchCreateFeaturesResponse, + BatchReadFeatureValuesOperationMetadata, + BatchReadFeatureValuesRequest, + BatchReadFeatureValuesResponse, + CreateEntityTypeOperationMetadata, + CreateEntityTypeRequest, + CreateFeatureOperationMetadata, + CreateFeatureRequest, + CreateFeaturestoreOperationMetadata, + CreateFeaturestoreRequest, + DeleteEntityTypeRequest, + DeleteFeatureRequest, + DeleteFeaturestoreRequest, + DestinationFeatureSetting, + ExportFeatureValuesOperationMetadata, + ExportFeatureValuesRequest, + ExportFeatureValuesResponse, + FeatureValueDestination, + GetEntityTypeRequest, + GetFeatureRequest, + GetFeaturestoreRequest, + ImportFeatureValuesOperationMetadata, + ImportFeatureValuesRequest, + ImportFeatureValuesResponse, + ListEntityTypesRequest, + ListEntityTypesResponse, + ListFeaturesRequest, + ListFeaturesResponse, + ListFeaturestoresRequest, + ListFeaturestoresResponse, + SearchFeaturesRequest, + SearchFeaturesResponse, + UpdateEntityTypeRequest, + UpdateFeatureRequest, + UpdateFeaturestoreOperationMetadata, + UpdateFeaturestoreRequest, +) from .hyperparameter_tuning_job import HyperparameterTuningJob +from .index import Index +from .index_endpoint import ( + DeployedIndex, + DeployedIndexAuthConfig, + IndexEndpoint, + IndexPrivateEndpoints, +) +from .index_endpoint_service import ( + CreateIndexEndpointOperationMetadata, + CreateIndexEndpointRequest, + DeleteIndexEndpointRequest, + DeployIndexOperationMetadata, + DeployIndexRequest, + DeployIndexResponse, + GetIndexEndpointRequest, + ListIndexEndpointsRequest, + ListIndexEndpointsResponse, + UndeployIndexOperationMetadata, + UndeployIndexRequest, + UndeployIndexResponse, + UpdateIndexEndpointRequest, +) +from .index_service import ( + CreateIndexOperationMetadata, + CreateIndexRequest, + DeleteIndexRequest, + GetIndexRequest, + ListIndexesRequest, + ListIndexesResponse, + NearestNeighborSearchOperationMetadata, + UpdateIndexOperationMetadata, + UpdateIndexRequest, +) from .io import ( + AvroSource, BigQueryDestination, BigQuerySource, ContainerRegistryDestination, + CsvDestination, + CsvSource, GcsDestination, GcsSource, + TFRecordDestination, ) from .job_service import ( CancelBatchPredictionJobRequest, @@ -113,14 +211,17 @@ CreateCustomJobRequest, CreateDataLabelingJobRequest, CreateHyperparameterTuningJobRequest, + CreateModelDeploymentMonitoringJobRequest, DeleteBatchPredictionJobRequest, DeleteCustomJobRequest, DeleteDataLabelingJobRequest, DeleteHyperparameterTuningJobRequest, + DeleteModelDeploymentMonitoringJobRequest, GetBatchPredictionJobRequest, GetCustomJobRequest, GetDataLabelingJobRequest, GetHyperparameterTuningJobRequest, + GetModelDeploymentMonitoringJobRequest, ListBatchPredictionJobsRequest, ListBatchPredictionJobsResponse, ListCustomJobsRequest, @@ -129,7 +230,16 @@ ListDataLabelingJobsResponse, ListHyperparameterTuningJobsRequest, ListHyperparameterTuningJobsResponse, + ListModelDeploymentMonitoringJobsRequest, + ListModelDeploymentMonitoringJobsResponse, + PauseModelDeploymentMonitoringJobRequest, + ResumeModelDeploymentMonitoringJobRequest, + SearchModelDeploymentMonitoringStatsAnomaliesRequest, + SearchModelDeploymentMonitoringStatsAnomaliesResponse, + UpdateModelDeploymentMonitoringJobOperationMetadata, + UpdateModelDeploymentMonitoringJobRequest, ) +from .lineage_subgraph import LineageSubgraph from .machine_resources import ( AutomaticResources, AutoscalingMetricSpec, @@ -140,6 +250,46 @@ ResourcesConsumed, ) from .manual_batch_tuning_parameters import ManualBatchTuningParameters +from .metadata_schema import MetadataSchema +from .metadata_service import ( + AddContextArtifactsAndExecutionsRequest, + AddContextArtifactsAndExecutionsResponse, + AddContextChildrenRequest, + AddContextChildrenResponse, + AddExecutionEventsRequest, + AddExecutionEventsResponse, + CreateArtifactRequest, + CreateContextRequest, + CreateExecutionRequest, + CreateMetadataSchemaRequest, + CreateMetadataStoreOperationMetadata, + CreateMetadataStoreRequest, + DeleteContextRequest, + DeleteMetadataStoreOperationMetadata, + DeleteMetadataStoreRequest, + GetArtifactRequest, + GetContextRequest, + GetExecutionRequest, + GetMetadataSchemaRequest, + GetMetadataStoreRequest, + ListArtifactsRequest, + ListArtifactsResponse, + ListContextsRequest, + ListContextsResponse, + ListExecutionsRequest, + ListExecutionsResponse, + ListMetadataSchemasRequest, + ListMetadataSchemasResponse, + ListMetadataStoresRequest, + ListMetadataStoresResponse, + QueryArtifactLineageSubgraphRequest, + QueryContextLineageSubgraphRequest, + QueryExecutionInputsAndOutputsRequest, + UpdateArtifactRequest, + UpdateContextRequest, + UpdateExecutionRequest, +) +from .metadata_store import MetadataStore from .migratable_resource import MigratableResource from .migration_service import ( BatchMigrateResourcesOperationMetadata, @@ -156,8 +306,22 @@ Port, PredictSchemata, ) +from .model_deployment_monitoring_job import ( + ModelDeploymentMonitoringBigQueryTable, + ModelDeploymentMonitoringJob, + ModelDeploymentMonitoringObjectiveConfig, + ModelDeploymentMonitoringScheduleConfig, + ModelMonitoringStatsAnomalies, + ModelDeploymentMonitoringObjectiveType, +) from .model_evaluation import ModelEvaluation from .model_evaluation_slice import ModelEvaluationSlice +from .model_monitoring import ( + ModelMonitoringAlertConfig, + ModelMonitoringObjectiveConfig, + SamplingStrategy, + ThresholdConfig, +) from .model_service import ( DeleteModelRequest, ExportModelOperationMetadata, @@ -181,11 +345,23 @@ DeleteOperationMetadata, GenericOperationMetadata, ) +from .pipeline_job import ( + PipelineJob, + PipelineJobDetail, + PipelineTaskDetail, + PipelineTaskExecutorDetail, +) from .pipeline_service import ( + CancelPipelineJobRequest, CancelTrainingPipelineRequest, + CreatePipelineJobRequest, CreateTrainingPipelineRequest, + DeletePipelineJobRequest, DeleteTrainingPipelineRequest, + GetPipelineJobRequest, GetTrainingPipelineRequest, + ListPipelineJobsRequest, + ListPipelineJobsResponse, ListTrainingPipelinesRequest, ListTrainingPipelinesResponse, ) @@ -212,6 +388,54 @@ StudySpec, Trial, ) +from .tensorboard import Tensorboard +from .tensorboard_data import ( + Scalar, + TensorboardBlob, + TensorboardBlobSequence, + TensorboardTensor, + TimeSeriesData, + TimeSeriesDataPoint, +) +from .tensorboard_experiment import TensorboardExperiment +from .tensorboard_run import TensorboardRun +from .tensorboard_service import ( + CreateTensorboardExperimentRequest, + CreateTensorboardOperationMetadata, + CreateTensorboardRequest, + CreateTensorboardRunRequest, + CreateTensorboardTimeSeriesRequest, + DeleteTensorboardExperimentRequest, + DeleteTensorboardRequest, + DeleteTensorboardRunRequest, + DeleteTensorboardTimeSeriesRequest, + ExportTensorboardTimeSeriesDataRequest, + ExportTensorboardTimeSeriesDataResponse, + GetTensorboardExperimentRequest, + GetTensorboardRequest, + GetTensorboardRunRequest, + GetTensorboardTimeSeriesRequest, + ListTensorboardExperimentsRequest, + ListTensorboardExperimentsResponse, + ListTensorboardRunsRequest, + ListTensorboardRunsResponse, + ListTensorboardsRequest, + ListTensorboardsResponse, + ListTensorboardTimeSeriesRequest, + ListTensorboardTimeSeriesResponse, + ReadTensorboardBlobDataRequest, + ReadTensorboardBlobDataResponse, + ReadTensorboardTimeSeriesDataRequest, + ReadTensorboardTimeSeriesDataResponse, + UpdateTensorboardExperimentRequest, + UpdateTensorboardOperationMetadata, + UpdateTensorboardRequest, + UpdateTensorboardRunRequest, + UpdateTensorboardTimeSeriesRequest, + WriteTensorboardRunDataRequest, + WriteTensorboardRunDataResponse, +) +from .tensorboard_time_series import TensorboardTimeSeries from .training_pipeline import ( FilterSplit, FractionSplit, @@ -220,7 +444,14 @@ TimestampSplit, TrainingPipeline, ) +from .types import ( + BoolArray, + DoubleArray, + Int64Array, + StringArray, +) from .user_action_reference import UserActionReference +from .value import Value from .vizier_service import ( AddTrialMeasurementRequest, CheckTrialEarlyStoppingStateMetatdata, @@ -250,8 +481,10 @@ "AcceleratorType", "Annotation", "AnnotationSpec", + "Artifact", "BatchPredictionJob", "CompletionStats", + "Context", "ContainerSpec", "CustomJob", "CustomJobSpec", @@ -284,6 +517,7 @@ "ListDatasetsRequest", "ListDatasetsResponse", "UpdateDatasetRequest", + "DeployedIndexRef", "DeployedModelRef", "EncryptionSpec", "DeployedModel", @@ -301,7 +535,10 @@ "UndeployModelRequest", "UndeployModelResponse", "UpdateEndpointRequest", + "EntityType", "EnvVar", + "Event", + "Execution", "Attribution", "Explanation", "ExplanationMetadataOverride", @@ -315,12 +552,92 @@ "SmoothGradConfig", "XraiAttribution", "ExplanationMetadata", + "Feature", + "FeatureStatsAnomaly", + "FeatureSelector", + "IdMatcher", + "Featurestore", + "FeaturestoreMonitoringConfig", + "FeatureValue", + "FeatureValueList", + "ReadFeatureValuesRequest", + "ReadFeatureValuesResponse", + "StreamingReadFeatureValuesRequest", + "BatchCreateFeaturesOperationMetadata", + "BatchCreateFeaturesRequest", + "BatchCreateFeaturesResponse", + "BatchReadFeatureValuesOperationMetadata", + "BatchReadFeatureValuesRequest", + "BatchReadFeatureValuesResponse", + "CreateEntityTypeOperationMetadata", + "CreateEntityTypeRequest", + "CreateFeatureOperationMetadata", + "CreateFeatureRequest", + "CreateFeaturestoreOperationMetadata", + "CreateFeaturestoreRequest", + "DeleteEntityTypeRequest", + "DeleteFeatureRequest", + "DeleteFeaturestoreRequest", + "DestinationFeatureSetting", + "ExportFeatureValuesOperationMetadata", + "ExportFeatureValuesRequest", + "ExportFeatureValuesResponse", + "FeatureValueDestination", + "GetEntityTypeRequest", + "GetFeatureRequest", + "GetFeaturestoreRequest", + "ImportFeatureValuesOperationMetadata", + "ImportFeatureValuesRequest", + "ImportFeatureValuesResponse", + "ListEntityTypesRequest", + "ListEntityTypesResponse", + "ListFeaturesRequest", + "ListFeaturesResponse", + "ListFeaturestoresRequest", + "ListFeaturestoresResponse", + "SearchFeaturesRequest", + "SearchFeaturesResponse", + "UpdateEntityTypeRequest", + "UpdateFeatureRequest", + "UpdateFeaturestoreOperationMetadata", + "UpdateFeaturestoreRequest", "HyperparameterTuningJob", + "Index", + "DeployedIndex", + "DeployedIndexAuthConfig", + "IndexEndpoint", + "IndexPrivateEndpoints", + "CreateIndexEndpointOperationMetadata", + "CreateIndexEndpointRequest", + "DeleteIndexEndpointRequest", + "DeployIndexOperationMetadata", + "DeployIndexRequest", + "DeployIndexResponse", + "GetIndexEndpointRequest", + "ListIndexEndpointsRequest", + "ListIndexEndpointsResponse", + "UndeployIndexOperationMetadata", + "UndeployIndexRequest", + "UndeployIndexResponse", + "UpdateIndexEndpointRequest", + "CreateIndexOperationMetadata", + "CreateIndexRequest", + "DeleteIndexRequest", + "GetIndexRequest", + "ListIndexesRequest", + "ListIndexesResponse", + "NearestNeighborSearchOperationMetadata", + "UpdateIndexOperationMetadata", + "UpdateIndexRequest", + "AvroSource", "BigQueryDestination", "BigQuerySource", "ContainerRegistryDestination", + "CsvDestination", + "CsvSource", "GcsDestination", "GcsSource", + "TFRecordDestination", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", @@ -329,14 +646,17 @@ "CreateCustomJobRequest", "CreateDataLabelingJobRequest", "CreateHyperparameterTuningJobRequest", + "CreateModelDeploymentMonitoringJobRequest", "DeleteBatchPredictionJobRequest", "DeleteCustomJobRequest", "DeleteDataLabelingJobRequest", "DeleteHyperparameterTuningJobRequest", + "DeleteModelDeploymentMonitoringJobRequest", "GetBatchPredictionJobRequest", "GetCustomJobRequest", "GetDataLabelingJobRequest", "GetHyperparameterTuningJobRequest", + "GetModelDeploymentMonitoringJobRequest", "ListBatchPredictionJobsRequest", "ListBatchPredictionJobsResponse", "ListCustomJobsRequest", @@ -345,7 +665,16 @@ "ListDataLabelingJobsResponse", "ListHyperparameterTuningJobsRequest", "ListHyperparameterTuningJobsResponse", + "ListModelDeploymentMonitoringJobsRequest", + "ListModelDeploymentMonitoringJobsResponse", + "PauseModelDeploymentMonitoringJobRequest", + "ResumeModelDeploymentMonitoringJobRequest", + "SearchModelDeploymentMonitoringStatsAnomaliesRequest", + "SearchModelDeploymentMonitoringStatsAnomaliesResponse", + "UpdateModelDeploymentMonitoringJobOperationMetadata", + "UpdateModelDeploymentMonitoringJobRequest", "JobState", + "LineageSubgraph", "AutomaticResources", "AutoscalingMetricSpec", "BatchDedicatedResources", @@ -354,6 +683,44 @@ "MachineSpec", "ResourcesConsumed", "ManualBatchTuningParameters", + "MetadataSchema", + "AddContextArtifactsAndExecutionsRequest", + "AddContextArtifactsAndExecutionsResponse", + "AddContextChildrenRequest", + "AddContextChildrenResponse", + "AddExecutionEventsRequest", + "AddExecutionEventsResponse", + "CreateArtifactRequest", + "CreateContextRequest", + "CreateExecutionRequest", + "CreateMetadataSchemaRequest", + "CreateMetadataStoreOperationMetadata", + "CreateMetadataStoreRequest", + "DeleteContextRequest", + "DeleteMetadataStoreOperationMetadata", + "DeleteMetadataStoreRequest", + "GetArtifactRequest", + "GetContextRequest", + "GetExecutionRequest", + "GetMetadataSchemaRequest", + "GetMetadataStoreRequest", + "ListArtifactsRequest", + "ListArtifactsResponse", + "ListContextsRequest", + "ListContextsResponse", + "ListExecutionsRequest", + "ListExecutionsResponse", + "ListMetadataSchemasRequest", + "ListMetadataSchemasResponse", + "ListMetadataStoresRequest", + "ListMetadataStoresResponse", + "QueryArtifactLineageSubgraphRequest", + "QueryContextLineageSubgraphRequest", + "QueryExecutionInputsAndOutputsRequest", + "UpdateArtifactRequest", + "UpdateContextRequest", + "UpdateExecutionRequest", + "MetadataStore", "MigratableResource", "BatchMigrateResourcesOperationMetadata", "BatchMigrateResourcesRequest", @@ -366,8 +733,18 @@ "ModelContainerSpec", "Port", "PredictSchemata", + "ModelDeploymentMonitoringBigQueryTable", + "ModelDeploymentMonitoringJob", + "ModelDeploymentMonitoringObjectiveConfig", + "ModelDeploymentMonitoringScheduleConfig", + "ModelMonitoringStatsAnomalies", + "ModelDeploymentMonitoringObjectiveType", "ModelEvaluation", "ModelEvaluationSlice", + "ModelMonitoringAlertConfig", + "ModelMonitoringObjectiveConfig", + "SamplingStrategy", + "ThresholdConfig", "DeleteModelRequest", "ExportModelOperationMetadata", "ExportModelRequest", @@ -387,10 +764,20 @@ "UploadModelResponse", "DeleteOperationMetadata", "GenericOperationMetadata", + "PipelineJob", + "PipelineJobDetail", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", + "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", + "CreatePipelineJobRequest", "CreateTrainingPipelineRequest", + "DeletePipelineJobRequest", "DeleteTrainingPipelineRequest", + "GetPipelineJobRequest", "GetTrainingPipelineRequest", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", "ListTrainingPipelinesRequest", "ListTrainingPipelinesResponse", "PipelineState", @@ -411,13 +798,62 @@ "Study", "StudySpec", "Trial", + "Tensorboard", + "Scalar", + "TensorboardBlob", + "TensorboardBlobSequence", + "TensorboardTensor", + "TimeSeriesData", + "TimeSeriesDataPoint", + "TensorboardExperiment", + "TensorboardRun", + "CreateTensorboardExperimentRequest", + "CreateTensorboardOperationMetadata", + "CreateTensorboardRequest", + "CreateTensorboardRunRequest", + "CreateTensorboardTimeSeriesRequest", + "DeleteTensorboardExperimentRequest", + "DeleteTensorboardRequest", + "DeleteTensorboardRunRequest", + "DeleteTensorboardTimeSeriesRequest", + "ExportTensorboardTimeSeriesDataRequest", + "ExportTensorboardTimeSeriesDataResponse", + "GetTensorboardExperimentRequest", + "GetTensorboardRequest", + "GetTensorboardRunRequest", + "GetTensorboardTimeSeriesRequest", + "ListTensorboardExperimentsRequest", + "ListTensorboardExperimentsResponse", + "ListTensorboardRunsRequest", + "ListTensorboardRunsResponse", + "ListTensorboardsRequest", + "ListTensorboardsResponse", + "ListTensorboardTimeSeriesRequest", + "ListTensorboardTimeSeriesResponse", + "ReadTensorboardBlobDataRequest", + "ReadTensorboardBlobDataResponse", + "ReadTensorboardTimeSeriesDataRequest", + "ReadTensorboardTimeSeriesDataResponse", + "UpdateTensorboardExperimentRequest", + "UpdateTensorboardOperationMetadata", + "UpdateTensorboardRequest", + "UpdateTensorboardRunRequest", + "UpdateTensorboardTimeSeriesRequest", + "WriteTensorboardRunDataRequest", + "WriteTensorboardRunDataResponse", + "TensorboardTimeSeries", "FilterSplit", "FractionSplit", "InputDataConfig", "PredefinedSplit", "TimestampSplit", "TrainingPipeline", + "BoolArray", + "DoubleArray", + "Int64Array", + "StringArray", "UserActionReference", + "Value", "AddTrialMeasurementRequest", "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/annotation.py b/google/cloud/aiplatform_v1beta1/types/annotation.py index a42ef0da82..3af3aa73eb 100644 --- a/google/cloud/aiplatform_v1beta1/types/annotation.py +++ b/google/cloud/aiplatform_v1beta1/types/annotation.py @@ -38,17 +38,17 @@ class Annotation(proto.Message): payload_schema_uri (str): Required. Google Cloud Storage URI points to a YAML file describing - ``payload``. + [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. The schema is defined as an `OpenAPI 3.0.2 Schema Object `__. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's - ``metadata``. + [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]. payload (google.protobuf.struct_pb2.Value): Required. The schema of the payload can be found in - ``payload_schema``. + [payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Annotation was created. diff --git a/google/cloud/aiplatform_v1beta1/types/artifact.py b/google/cloud/aiplatform_v1beta1/types/artifact.py new file mode 100644 index 0000000000..1246ac443b --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Artifact",}, +) + + +class Artifact(proto.Message): + r"""Instance of a general artifact. + + Attributes: + name (str): + Output only. The resource name of the + Artifact. + display_name (str): + User provided display name of the Artifact. + May be up to 128 Unicode characters. + uri (str): + The uniform resource identifier of the + artifact file. May be empty if there is no + actual artifact file. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact.LabelsEntry]): + The labels with user-defined metadata to + organize your Artifacts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Artifact (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + last updated. + state (google.cloud.aiplatform_v1beta1.types.Artifact.State): + The state of this Artifact. This is a + property of the Artifact, and does not imply or + capture any ongoing process. This property is + managed by clients (such as AI Platform + Pipelines), and the system does not prescribe or + check the validity of state transitions. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Artifact. + description (str): + Description of the Artifact + """ + + class State(proto.Enum): + r"""Describes the state of the Artifact.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + LIVE = 2 + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + uri = proto.Field(proto.STRING, number=6) + + etag = proto.Field(proto.STRING, number=9) + + labels = proto.MapField(proto.STRING, proto.STRING, number=10) + + create_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=12, message=timestamp.Timestamp,) + + state = proto.Field(proto.ENUM, number=13, enum=State,) + + schema_title = proto.Field(proto.STRING, number=14) + + schema_version = proto.Field(proto.STRING, number=15) + + metadata = proto.Field(proto.MESSAGE, number=16, message=struct.Struct,) + + description = proto.Field(proto.STRING, number=17) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index 9c79349b9e..8d85090929 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -41,7 +41,7 @@ class BatchPredictionJob(proto.Message): r"""A job that uses a - ``Model`` to + [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the instances fail, the @@ -67,33 +67,33 @@ class BatchPredictionJob(proto.Message): may be specified via the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. model_parameters (google.protobuf.struct_pb2.Value): The parameters that govern the predictions. The schema of the parameters may be specified via the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. output_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputConfig): Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri`` + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and - ``prediction_schema_uri``. + [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. dedicated_resources (google.cloud.aiplatform_v1beta1.types.BatchDedicatedResources): The config of resources used by the Model during the batch prediction. If the Model - ``supports`` + [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types] DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. manual_batch_tuning_parameters (google.cloud.aiplatform_v1beta1.types.ManualBatchTuningParameters): Immutable. Parameters configuring the batch behavior. Currently only applicable when - ``dedicated_resources`` + [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] are used (in other cases AI Platform does the tuning itself). generate_explanation (bool): @@ -101,41 +101,41 @@ class BatchPredictionJob(proto.Message): When set to ``true``, the batch prediction output changes based on the ``predictions_format`` field of the - ``BatchPredictionJob.output_config`` + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config] object: - ``bigquery``: output includes a column named ``explanation``. The value is a struct that conforms to the - ``Explanation`` + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object. - ``jsonl``: The JSON objects on each line include an additional entry keyed ``explanation``. The value of the entry is a JSON object that conforms to the - ``Explanation`` + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object. - ``csv``: Generating explanations for CSV format is not supported. If this field is set to true, either the - ``Model.explanation_spec`` + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] or - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] must be populated. explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): Explanation configuration for this BatchPredictionJob. Can be specified only if - ``generate_explanation`` + [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] is set to ``true``. This value overrides the value of - ``Model.explanation_spec``. + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. All fields of - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] are optional in the request. If a field of the - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] object is not populated, the corresponding field of the - ``Model.explanation_spec`` + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] object is inherited. output_info (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputInfo): Output only. Information further describing @@ -194,9 +194,9 @@ class BatchPredictionJob(proto.Message): class InputConfig(proto.Message): r"""Configures the input to - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. See - ``Model.supported_input_storage_formats`` + [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] for Model's supported input formats, and how instances should be expressed via any of them. @@ -215,7 +215,7 @@ class InputConfig(proto.Message): Required. The format in which instances are given, must be one of the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - ``supported_input_storage_formats``. + [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]. """ gcs_source = proto.Field( @@ -230,9 +230,9 @@ class InputConfig(proto.Message): class OutputConfig(proto.Message): r"""Configures the output of - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. See - ``Model.supported_output_storage_formats`` + [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] for supported output formats, and how predictions are expressed via any of them. @@ -247,15 +247,15 @@ class OutputConfig(proto.Message): ``predictions_0002.``, ..., ``predictions_N.`` are created where ```` depends on chosen - ``predictions_format``, + [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both - ``instance`` + [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and - ``prediction`` + [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] schemata defined then each such file contains predictions as per the - ``predictions_format``. + [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format]. If prediction for any instance failed (partially or completely), then an additional ``errors_0001.``, ``errors_0002.``,..., ``errors_N.`` @@ -265,18 +265,18 @@ class OutputConfig(proto.Message): which as value has ```google.rpc.Status`` `__ containing only ``code`` and ``message`` fields. bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - The BigQuery project location where the output is to be - written to. In the given project a new dataset is created - with name + The BigQuery project or dataset location where the output is + to be written to. If project is provided, a new dataset is + created with name ``prediction__`` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, ``predictions``, and ``errors``. If the Model has both - ``instance`` + [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and - ``prediction`` + [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] schemata defined then the tables have columns as follows: The ``predictions`` table contains instances for which the prediction succeeded, it has columns as per a concatenation @@ -290,7 +290,7 @@ class OutputConfig(proto.Message): Required. The format in which AI Platform gives the predictions, must be one of the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - ``supported_output_storage_formats``. + [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. """ gcs_destination = proto.Field( @@ -308,7 +308,7 @@ class OutputConfig(proto.Message): class OutputInfo(proto.Message): r"""Further describes this job's output. Supplements - ``output_config``. + [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. Attributes: gcs_output_directory (str): diff --git a/google/cloud/aiplatform_v1beta1/types/context.py b/google/cloud/aiplatform_v1beta1/types/context.py new file mode 100644 index 0000000000..5adaf07f3c --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/context.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Context",}, +) + + +class Context(proto.Message): + r"""Instance of a general context. + + Attributes: + name (str): + Output only. The resource name of the + Context. + display_name (str): + User provided display name of the Context. + May be up to 128 Unicode characters. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Context.LabelsEntry]): + The labels with user-defined metadata to + organize your Contexts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Context (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + last updated. + parent_contexts (Sequence[str]): + Output only. A list of resource names of Contexts that are + parents of this Context. A Context may have at most 10 + parent_contexts. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Context. + description (str): + Description of the Context + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + etag = proto.Field(proto.STRING, number=8) + + labels = proto.MapField(proto.STRING, proto.STRING, number=9) + + create_time = proto.Field(proto.MESSAGE, number=10, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,) + + parent_contexts = proto.RepeatedField(proto.STRING, number=12) + + schema_title = proto.Field(proto.STRING, number=13) + + schema_version = proto.Field(proto.STRING, number=14) + + metadata = proto.Field(proto.MESSAGE, number=15, message=struct.Struct,) + + description = proto.Field(proto.STRING, number=16) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 1d148b7777..aa7fe5aa77 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -152,7 +152,7 @@ class CustomJobSpec(proto.Message): CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of - name ``id`` under + name [id][google.cloud.aiplatform.v1beta1.Trial.id] under its parent HyperparameterTuningJob's baseOutputDirectory. The following AI Platform environment variables will be @@ -175,6 +175,12 @@ class CustomJobSpec(proto.Message): ``//checkpoints/`` - AIP_TENSORBOARD_LOG_DIR = ``//logs/`` + tensorboard (str): + Optional. The name of an AI Platform + [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` """ worker_pool_specs = proto.RepeatedField( @@ -191,6 +197,8 @@ class CustomJobSpec(proto.Message): proto.MESSAGE, number=6, message=io.GcsDestination, ) + tensorboard = proto.Field(proto.STRING, number=7) + class WorkerPoolSpec(proto.Message): r"""Represents the spec of a worker pool in a job. diff --git a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py index d750f53e66..08b63ca73e 100644 --- a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py @@ -120,7 +120,7 @@ class DataLabelingJob(proto.Message): - "aiplatform.googleapis.com/schema": output only, its value is the - ``inputs_schema``'s + [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s title. specialist_pools (Sequence[str]): The SpecialistPools' resource names diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index 9fa17fcb3a..492889a6f5 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -131,7 +131,7 @@ class ImportDataConfig(proto.Message): if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation labels specified inside index file referenced by - ``import_schema_uri``, + [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], e.g. jsonl file. import_schema_uri (str): Required. Points to a YAML file stored on Google Cloud @@ -172,7 +172,7 @@ class ExportDataConfig(proto.Message): to-be-exported DataItems(specified by [data_items_filter][]) that match this filter will be exported. The filter syntax is the same as in - ``ListAnnotations``. + [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. """ gcs_destination = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/google/cloud/aiplatform_v1beta1/types/dataset_service.py index 1ab94b8c89..8a068a2911 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -52,7 +52,7 @@ class CreateDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. Attributes: parent (str): @@ -70,7 +70,7 @@ class CreateDatasetRequest(proto.Message): class CreateDatasetOperationMetadata(proto.Message): r"""Runtime operation information for - ``DatasetService.CreateDataset``. + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): @@ -84,7 +84,7 @@ class CreateDatasetOperationMetadata(proto.Message): class GetDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.GetDataset``. + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. Attributes: name (str): @@ -100,7 +100,7 @@ class GetDatasetRequest(proto.Message): class UpdateDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.UpdateDataset``. + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. Attributes: dataset (google.cloud.aiplatform_v1beta1.types.Dataset): @@ -124,7 +124,7 @@ class UpdateDatasetRequest(proto.Message): class ListDatasetsRequest(proto.Message): r"""Request message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. Attributes: parent (str): @@ -178,7 +178,7 @@ class ListDatasetsRequest(proto.Message): class ListDatasetsResponse(proto.Message): r"""Response message for - ``DatasetService.ListDatasets``. + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. Attributes: datasets (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset]): @@ -201,7 +201,7 @@ def raw_page(self): class DeleteDatasetRequest(proto.Message): r"""Request message for - ``DatasetService.DeleteDataset``. + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. Attributes: name (str): @@ -215,7 +215,7 @@ class DeleteDatasetRequest(proto.Message): class ImportDataRequest(proto.Message): r"""Request message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. Attributes: name (str): @@ -236,13 +236,13 @@ class ImportDataRequest(proto.Message): class ImportDataResponse(proto.Message): r"""Response message for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. """ class ImportDataOperationMetadata(proto.Message): r"""Runtime operation information for - ``DatasetService.ImportData``. + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): @@ -256,7 +256,7 @@ class ImportDataOperationMetadata(proto.Message): class ExportDataRequest(proto.Message): r"""Request message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. Attributes: name (str): @@ -275,7 +275,7 @@ class ExportDataRequest(proto.Message): class ExportDataResponse(proto.Message): r"""Response message for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. Attributes: exported_files (Sequence[str]): @@ -288,7 +288,7 @@ class ExportDataResponse(proto.Message): class ExportDataOperationMetadata(proto.Message): r"""Runtime operation information for - ``DatasetService.ExportData``. + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): @@ -308,7 +308,7 @@ class ExportDataOperationMetadata(proto.Message): class ListDataItemsRequest(proto.Message): r"""Request message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. Attributes: parent (str): @@ -344,7 +344,7 @@ class ListDataItemsRequest(proto.Message): class ListDataItemsResponse(proto.Message): r"""Response message for - ``DatasetService.ListDataItems``. + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. Attributes: data_items (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem]): @@ -367,7 +367,7 @@ def raw_page(self): class GetAnnotationSpecRequest(proto.Message): r"""Request message for - ``DatasetService.GetAnnotationSpec``. + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. Attributes: name (str): @@ -384,7 +384,7 @@ class GetAnnotationSpecRequest(proto.Message): class ListAnnotationsRequest(proto.Message): r"""Request message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. Attributes: parent (str): @@ -420,7 +420,7 @@ class ListAnnotationsRequest(proto.Message): class ListAnnotationsResponse(proto.Message): r"""Response message for - ``DatasetService.ListAnnotations``. + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. Attributes: annotations (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation]): diff --git a/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py b/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py new file mode 100644 index 0000000000..e6881865ca --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"DeployedIndexRef",}, +) + + +class DeployedIndexRef(proto.Message): + r"""Points to a DeployedIndex. + + Attributes: + index_endpoint (str): + Immutable. A resource name of the + IndexEndpoint. + deployed_index_id (str): + Immutable. The ID of the DeployedIndex in the + above IndexEndpoint. + """ + + index_endpoint = proto.Field(proto.STRING, number=1) + + deployed_index_id = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index 40ede068f3..fb8b12af12 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -46,9 +46,9 @@ class Endpoint(proto.Message): deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModel]): Output only. The models deployed in this Endpoint. To add or remove DeployedModels use - ``EndpointService.DeployModel`` + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] and - ``EndpointService.UndeployModel`` + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] respectively. traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.TrafficSplitEntry]): A map from a DeployedModel's ID to the @@ -142,19 +142,19 @@ class DeployedModel(proto.Message): Explanation configuration for this DeployedModel. When deploying a Model using - ``EndpointService.DeployModel``, + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], this value overrides the value of - ``Model.explanation_spec``. + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. All fields of - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] are optional in the request. If a field of - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] is not populated, the value of the same field of - ``Model.explanation_spec`` + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is inherited. If the corresponding - ``Model.explanation_spec`` + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is not populated, all fields of the - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] will be used for the explanation configuration. service_account (str): The service account that the DeployedModel's container runs diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index fe7442ab2a..a67bbafd7c 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -45,7 +45,7 @@ class CreateEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. Attributes: parent (str): @@ -63,7 +63,7 @@ class CreateEndpointRequest(proto.Message): class CreateEndpointOperationMetadata(proto.Message): r"""Runtime operation information for - ``EndpointService.CreateEndpoint``. + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): @@ -77,7 +77,7 @@ class CreateEndpointOperationMetadata(proto.Message): class GetEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.GetEndpoint`` + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] Attributes: name (str): @@ -90,7 +90,7 @@ class GetEndpointRequest(proto.Message): class ListEndpointsRequest(proto.Message): r"""Request message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. Attributes: parent (str): @@ -124,9 +124,9 @@ class ListEndpointsRequest(proto.Message): page_token (str): Optional. The standard list page token. Typically obtained via - ``ListEndpointsResponse.next_page_token`` + [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] of the previous - ``EndpointService.ListEndpoints`` + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Optional. Mask specifying which fields to @@ -146,14 +146,14 @@ class ListEndpointsRequest(proto.Message): class ListEndpointsResponse(proto.Message): r"""Response message for - ``EndpointService.ListEndpoints``. + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. Attributes: endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint]): List of Endpoints in the requested page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListEndpointsRequest.page_token`` + [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] to obtain that page. """ @@ -170,7 +170,7 @@ def raw_page(self): class UpdateEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. Attributes: endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): @@ -188,7 +188,7 @@ class UpdateEndpointRequest(proto.Message): class DeleteEndpointRequest(proto.Message): r"""Request message for - ``EndpointService.DeleteEndpoint``. + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. Attributes: name (str): @@ -202,7 +202,7 @@ class DeleteEndpointRequest(proto.Message): class DeployModelRequest(proto.Message): r"""Request message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. Attributes: endpoint (str): @@ -212,17 +212,17 @@ class DeployModelRequest(proto.Message): deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): Required. The DeployedModel to be created within the Endpoint. Note that - ``Endpoint.traffic_split`` + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - ``EndpointService.UpdateEndpoint``. + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by @@ -230,7 +230,7 @@ class DeployModelRequest(proto.Message): 100. If this field is empty, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] is not updated. """ @@ -245,7 +245,7 @@ class DeployModelRequest(proto.Message): class DeployModelResponse(proto.Message): r"""Response message for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. Attributes: deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): @@ -260,7 +260,7 @@ class DeployModelResponse(proto.Message): class DeployModelOperationMetadata(proto.Message): r"""Runtime operation information for - ``EndpointService.DeployModel``. + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): @@ -274,7 +274,7 @@ class DeployModelOperationMetadata(proto.Message): class UndeployModelRequest(proto.Message): r"""Request message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. Attributes: endpoint (str): @@ -286,7 +286,7 @@ class UndeployModelRequest(proto.Message): undeployed from the Endpoint. traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): If this field is provided, then the Endpoint's - ``traffic_split`` + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A @@ -304,13 +304,13 @@ class UndeployModelRequest(proto.Message): class UndeployModelResponse(proto.Message): r"""Response message for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. """ class UndeployModelOperationMetadata(proto.Message): r"""Runtime operation information for - ``EndpointService.UndeployModel``. + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): diff --git a/google/cloud/aiplatform_v1beta1/types/entity_type.py b/google/cloud/aiplatform_v1beta1/types/entity_type.py new file mode 100644 index 0000000000..c1e599c569 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/entity_type.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"EntityType",}, +) + + +class EntityType(proto.Message): + r"""An entity type is a type of object in a system that needs to + be modeled and have stored information about. For example, + driver is an entity type, and driver0 is an instance of an + entity type driver. + + Attributes: + name (str): + Immutable. Name of the EntityType. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + The last part entity_type is assigned by the client. The + entity_type can be up to 64 characters long and can consist + only of ASCII Latin letters A-Z and a-z and underscore(_), + and ASCII digits 0-9 starting with a letter. The value will + be unique given a featurestore. + description (str): + Optional. Description of the EntityType. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your EntityTypes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one EntityType + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Optional. Used to perform a consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): + Optional. The default monitoring configuration for all + Features under this EntityType. + + If this is populated with + [FeaturestoreMonitoringConfig.monitoring_interval] + specified, snapshot analysis monitoring is enabled. + Otherwise, snapshot analysis monitoring is disabled. + """ + + name = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=2) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + etag = proto.Field(proto.STRING, number=7) + + monitoring_config = proto.Field( + proto.MESSAGE, + number=8, + message=featurestore_monitoring.FeaturestoreMonitoringConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/event.py b/google/cloud/aiplatform_v1beta1/types/event.py new file mode 100644 index 0000000000..52bf55e074 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/event.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Event",}, +) + + +class Event(proto.Message): + r"""An edge describing the relationship between an Artifact and + an Execution in a lineage graph. + + Attributes: + artifact (str): + Required. The relative resource name of the + Artifact in the Event. + execution (str): + Output only. The relative resource name of + the Execution in the Event. + event_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time the Event occurred. + type_ (google.cloud.aiplatform_v1beta1.types.Event.Type): + Required. The type of the Event. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Event.LabelsEntry]): + The labels with user-defined metadata to + annotate Events. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Event (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + """ + + class Type(proto.Enum): + r"""Describes whether an Event's Artifact is the Execution's + input or output. + """ + TYPE_UNSPECIFIED = 0 + INPUT = 1 + OUTPUT = 2 + + artifact = proto.Field(proto.STRING, number=1) + + execution = proto.Field(proto.STRING, number=2) + + event_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + type_ = proto.Field(proto.ENUM, number=4, enum=Type,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/execution.py b/google/cloud/aiplatform_v1beta1/types/execution.py new file mode 100644 index 0000000000..6b401db1f6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/execution.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Execution",}, +) + + +class Execution(proto.Message): + r"""Instance of a general execution. + + Attributes: + name (str): + Output only. The resource name of the + Execution. + display_name (str): + User provided display name of the Execution. + May be up to 128 Unicode characters. + state (google.cloud.aiplatform_v1beta1.types.Execution.State): + The state of this Execution. This is a + property of the Execution, and does not imply or + capture any ongoing process. This property is + managed by clients (such as AI Platform + Pipelines) and the system does not prescribe or + check the validity of state transitions. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Execution.LabelsEntry]): + The labels with user-defined metadata to + organize your Executions. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Execution (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was last updated. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Execution. + description (str): + Description of the Execution + """ + + class State(proto.Enum): + r"""Describes the state of the Execution.""" + STATE_UNSPECIFIED = 0 + NEW = 1 + RUNNING = 2 + COMPLETE = 3 + FAILED = 4 + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + state = proto.Field(proto.ENUM, number=6, enum=State,) + + etag = proto.Field(proto.STRING, number=9) + + labels = proto.MapField(proto.STRING, proto.STRING, number=10) + + create_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=12, message=timestamp.Timestamp,) + + schema_title = proto.Field(proto.STRING, number=13) + + schema_version = proto.Field(proto.STRING, number=14) + + metadata = proto.Field(proto.MESSAGE, number=15, message=struct.Struct,) + + description = proto.Field(proto.STRING, number=16) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index d9b48b60ab..5d4ebbdceb 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -43,9 +43,9 @@ class Explanation(proto.Message): r"""Explanation of a prediction (provided in - ``PredictResponse.predictions``) + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]) produced by the Model on a given - ``instance``. + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. Attributes: attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): @@ -58,18 +58,18 @@ class Explanation(proto.Message): that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. - ``Attribution.output_index`` + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which output this attribution is explaining. If users set - ``ExplanationParameters.top_k``, + [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], the attributions are sorted by - ``instance_output_value`` + [instance_output_value][Attributions.instance_output_value] in descending order. If - ``ExplanationParameters.output_indices`` + [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] is specified, the attributions are stored by - ``Attribution.output_index`` + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] in the same order as they appear in the output_indices. """ @@ -92,21 +92,21 @@ class ModelExplanation(proto.Message): that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. - ``Attribution.output_index`` + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which output this attribution is explaining. The - ``baselineOutputValue``, - ``instanceOutputValue`` + [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value], + [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] and - ``featureAttributions`` + [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. - ``Attribution.approximation_error`` + [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] is not populated. """ @@ -123,13 +123,13 @@ class Attribution(proto.Message): Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in - ``ExplanationMetadata.inputs``. + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. The field name of the output is determined by the key in - ``ExplanationMetadata.outputs``. + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. If the Model's predicted output has multiple dimensions (rank > 1), this is the value in the output located by - ``output_index``. + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. If there are multiple baselines, their output values are averaged. @@ -137,11 +137,11 @@ class Attribution(proto.Message): Output only. Model predicted output on the corresponding [explanation instance][ExplainRequest.instances]. The field name of the output is determined by the key in - ``ExplanationMetadata.outputs``. + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. If the Model predicted output has multiple dimensions, this is the value in the output located by - ``output_index``. + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. feature_attributions (google.protobuf.struct_pb2.Value): Output only. Attributions of each explained feature. Features are extracted from the [prediction @@ -151,7 +151,7 @@ class Attribution(proto.Message): The value is a struct, whose keys are the name of the feature. The values are how much the feature in the - ``instance`` + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] contributed to the predicted result. The format of the value is determined by the feature's input @@ -163,21 +163,21 @@ class Attribution(proto.Message): - If the feature is an array of scalar values, the attribution value is an - ``array``. + [array][google.protobuf.Value.list_value]. - If the feature is a struct, the attribution value is a - ``struct``. The keys in + [struct][google.protobuf.Value.struct_value]. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The - ``ExplanationMetadata.feature_attributions_schema_uri`` + [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] field, pointed to by the - ``ExplanationSpec`` + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] field of the - ``Endpoint.deployed_models`` + [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object, points to the schema file that describes the features and their attribution values (if it is populated). output_index (Sequence[int]): @@ -192,7 +192,7 @@ class Attribution(proto.Message): of the output vector. Indices start from 0. output_display_name (str): Output only. The display name of the output identified by - ``output_index``. + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. For example, the predicted class name by a multi-classification Model. @@ -202,24 +202,24 @@ class Attribution(proto.Message): explained output, and can be located using output_index. approximation_error (float): Output only. Error of - ``feature_attributions`` + [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] caused by approximation used in the explanation method. Lower value means more precise attributions. - For Sampled Shapley - ``attribution``, + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], increasing - ``path_count`` + [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] might reduce the error. - For Integrated Gradients - ``attribution``, + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], increasing - ``step_count`` + [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] might reduce the error. - For [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], increasing - ``step_count`` + [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] might reduce the error. See `this @@ -228,7 +228,7 @@ class Attribution(proto.Message): output_name (str): Output only. Name of the explain output. Specified as the key in - ``ExplanationMetadata.outputs``. + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. """ baseline_output_value = proto.Field(proto.DOUBLE, number=1) @@ -305,12 +305,12 @@ class ExplanationParameters(proto.Message): returns explanations for all outputs. output_indices (google.protobuf.struct_pb2.ListValue): If populated, only returns attributions that have - ``output_index`` + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] contained in output_indices. It must be an ndarray of integers, with the same shape of the output it's explaining. If not populated, returns attributions for - ``top_k`` + [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] indices of outputs. If neither top_k nor output_indeices is populated, returns the argmax index of the outputs. @@ -444,16 +444,16 @@ class SmoothGradConfig(proto.Message): https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the distribution is different per feature, set - ``feature_noise_sigma`` + [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] instead for each feature. feature_noise_sigma (google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma): This is similar to - ``noise_sigma``, + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], but provides additional flexibility. A separate noise sigma can be provided for each feature, which is useful if their distributions are different. No noise is added to features that are not set. If this field is unset, - ``noise_sigma`` + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] will be used for all features. noisy_sample_count (int): The number of gradient samples to use for approximation. The @@ -497,7 +497,7 @@ class NoiseSigmaForFeature(proto.Message): This represents the standard deviation of the Gaussian kernel that will be used to add noise to the feature prior to computing gradients. Similar to - ``noise_sigma`` + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] but represents the noise added to the current feature. Defaults to 0.1. """ @@ -513,15 +513,15 @@ class NoiseSigmaForFeature(proto.Message): class ExplanationSpecOverride(proto.Message): r"""The - ``ExplanationSpec`` + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] entries that can be overridden at [online - explanation]``PredictionService.Explain`` + explanation][PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time. Attributes: parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): The parameters to be overridden. Note that the - ``method`` + [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] cannot be changed. If not specified, no parameter is overridden. metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride): @@ -538,7 +538,7 @@ class ExplanationSpecOverride(proto.Message): class ExplanationMetadataOverride(proto.Message): r"""The - ``ExplanationMetadata`` + [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata] entries that can be overridden at [online explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time. @@ -564,7 +564,7 @@ class InputMetadataOverride(proto.Message): Baseline inputs for this feature. This overrides the ``input_baseline`` field of the - ``ExplanationMetadata.InputMetadata`` + [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] object of the corresponding feature's input metadata. If it's not specified, the original baselines are not overridden. diff --git a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py index 69947e9b9e..4b5eca5241 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -38,16 +38,16 @@ class ExplanationMetadata(proto.Message): An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in - ``ExplanationMetadata.inputs``. + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. The baseline of the empty feature is chosen by AI Platform. For AI Platform provided Tensorflow images, the key can be any friendly name of the feature. Once specified, - ``featureAttributions`` + [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] are keyed by this key (if not grouped with another feature). For custom images, the key must match with the key in - ``instance``. + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. outputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputsEntry]): Required. Map from output names to output metadata. @@ -75,7 +75,7 @@ class InputMetadata(proto.Message): r"""Metadata of the input of a feature. Fields other than - ``InputMetadata.input_baselines`` + [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] are applicable only for Models that are using AI Platform-provided images for Tensorflow. @@ -95,12 +95,12 @@ class InputMetadata(proto.Message): For custom images, the element of the baselines must be in the same format as the feature's input in the - ``instance``[]. + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. The schema of any single instance may be specified via Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. input_tensor_name (str): Name of the input tensor for this feature. Required and is only applicable to AI Platform @@ -128,7 +128,7 @@ class InputMetadata(proto.Message): index_feature_mapping (Sequence[str]): A list of feature names for each index in the input tensor. Required when the input - ``InputMetadata.encoding`` + [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. encoded_tensor_name (str): Encoded tensor is a transformation of the input tensor. Must @@ -242,7 +242,7 @@ class Visualization(proto.Message): clip_percent_lowerbound (float): Excludes attributions below the specified percentile, from the highlighted areas. Defaults - to 35. + to 62. overlay_type (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType): How the original image is displayed in the visualization. Adjusting the overlay can help @@ -370,9 +370,9 @@ class OutputMetadata(proto.Message): The shape of the value must be an n-dimensional array of strings. The number of dimensions must match that of the outputs to be explained. The - ``Attribution.output_display_name`` + [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] is populated by locating in the mapping with - ``Attribution.output_index``. + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. display_name_mapping_key (str): Specify a field name in the prediction to look for the display name. @@ -382,7 +382,7 @@ class OutputMetadata(proto.Message): The display names in the prediction must have the same shape of the outputs, so that it can be located by - ``Attribution.output_index`` + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] for a specific output. output_tensor_name (str): Name of the output tensor. Required and is diff --git a/google/cloud/aiplatform_v1beta1/types/feature.py b/google/cloud/aiplatform_v1beta1/types/feature.py new file mode 100644 index 0000000000..6c71f32536 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/feature.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Feature",}, +) + + +class Feature(proto.Message): + r"""Feature Metadata information that describes an attribute of + an entity type. For example, apple is an entity type, and color + is a feature that describes apple. + + Attributes: + name (str): + Immutable. Name of the Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + The last part feature is assigned by the client. The feature + can be up to 64 characters long and can consist only of + ASCII Latin letters A-Z and a-z, underscore(_), and ASCII + digits 0-9 starting with a letter. The value will be unique + given an entity type. + description (str): + Description of the Feature. + value_type (google.cloud.aiplatform_v1beta1.types.Feature.ValueType): + Required. Immutable. Type of Feature value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Feature.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your Features. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one Feature + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): + Optional. The custom monitoring configuration for this + Feature, if not set, use the monitoring_config defined for + the EntityType this Feature belongs to. + + If this is populated with + [FeaturestoreMonitoringConfig.disabled][] = true, snapshot + analysis monitoring is disabled; if + [FeaturestoreMonitoringConfig.monitoring_interval][] + specified, snapshot analysis monitoring is enabled. + Otherwise, snapshot analysis monitoring config is same as + the EntityType's this Feature belongs to. + monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): + Output only. A list of historical [Snapshot + Analysis][google.cloud.aiplatform.master.FeaturestoreMonitoringConfig.SnapshotAnalysis] + stats requested by user, sorted by + [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] + descending. + """ + + class ValueType(proto.Enum): + r"""An enum representing the value type of a feature.""" + VALUE_TYPE_UNSPECIFIED = 0 + BOOL = 1 + BOOL_ARRAY = 2 + DOUBLE = 3 + DOUBLE_ARRAY = 4 + INT64 = 9 + INT64_ARRAY = 10 + STRING = 11 + STRING_ARRAY = 12 + BYTES = 13 + + name = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=2) + + value_type = proto.Field(proto.ENUM, number=3, enum=ValueType,) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + etag = proto.Field(proto.STRING, number=7) + + monitoring_config = proto.Field( + proto.MESSAGE, + number=9, + message=featurestore_monitoring.FeaturestoreMonitoringConfig, + ) + + monitoring_stats = proto.RepeatedField( + proto.MESSAGE, number=10, message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py b/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py new file mode 100644 index 0000000000..5fa2c45a8d --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"FeatureStatsAnomaly",}, +) + + +class FeatureStatsAnomaly(proto.Message): + r"""Stats and Anomaly generated at specific timestamp for specific + Feature. The start_time and end_time are used to define the time + range of the dataset that current stats belongs to, e.g. prediction + traffic is bucketed into prediction datasets by time window. If the + Dataset is not defined by time window, start_time = end_time. + Timestamp of the stats and anomalies always refers to end_time. Raw + stats and anomalies are stored in stats_uri or anomaly_uri in the + tensorflow defined protos. Field data_stats contains almost + identical information with the raw stats in AI Platform defined + proto, for UI to display. + + Attributes: + score (float): + Feature importance score, only populated when cross-feature + monitoring is enabled. For now only used to represent + feature attribution score within range [0, 1] for + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] + and + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT]. + stats_uri (str): + Path of the stats file for current feature values in Cloud + Storage bucket. Format: + gs:////stats. Example: + gs://monitoring_bucket/feature_name/stats. Stats are stored + as binary format with Protobuf message + `tensorflow.metadata.v0.FeatureNameStatistics `__. + anomaly_uri (str): + Path of the anomaly file for current feature values in Cloud + Storage bucket. Format: + gs:////anomalies. Example: + gs://monitoring_bucket/feature_name/anomalies. Stats are + stored as binary format with Protobuf message Anoamlies are + stored as binary format with Protobuf message + [tensorflow.metadata.v0.AnomalyInfo] + (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). + distribution_deviation (float): + Deviation from the current stats to baseline + stats. 1. For categorical feature, the + distribution distance is calculated by + L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + anomaly_detection_threshold (float): + This is the threshold used when detecting anomalies. The + threshold can be changed by user, so this one might be + different from + [ThresholdConfig.value][google.cloud.aiplatform.v1beta1.ThresholdConfig.value]. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The start timestamp of window where stats were generated. + For objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), start_time is only used + to indicate the monitoring intervals, so it always equals to + (end_time - monitoring_interval). + end_time (google.protobuf.timestamp_pb2.Timestamp): + The end timestamp of window where stats were generated. For + objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), end_time indicates the + timestamp of the data used to generate stats (e.g. timestamp + we take snapshots for feature values). + """ + + score = proto.Field(proto.DOUBLE, number=1) + + stats_uri = proto.Field(proto.STRING, number=3) + + anomaly_uri = proto.Field(proto.STRING, number=4) + + distribution_deviation = proto.Field(proto.DOUBLE, number=5) + + anomaly_detection_threshold = proto.Field(proto.DOUBLE, number=9) + + start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/feature_selector.py b/google/cloud/aiplatform_v1beta1/types/feature_selector.py new file mode 100644 index 0000000000..cda0ff6713 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/feature_selector.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={"IdMatcher", "FeatureSelector",}, +) + + +class IdMatcher(proto.Message): + r"""Matcher for Features of an EntityType by Feature ID. + + Attributes: + ids (Sequence[str]): + Required. The following are accepted as ``ids``: + + - A single-element list containing only ``*``, which + selects all Features in the target EntityType, or + - A list containing only Feature IDs, which selects only + Features with those IDs in the target EntityType. + """ + + ids = proto.RepeatedField(proto.STRING, number=1) + + +class FeatureSelector(proto.Message): + r"""Selector for Features of an EntityType. + + Attributes: + id_matcher (google.cloud.aiplatform_v1beta1.types.IdMatcher): + Required. Matches Features based on ID. + """ + + id_matcher = proto.Field(proto.MESSAGE, number=1, message="IdMatcher",) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py new file mode 100644 index 0000000000..670453f362 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Featurestore",}, +) + + +class Featurestore(proto.Message): + r"""Featurestore configuration information on how the + Featurestore is configured. + + Attributes: + name (str): + Output only. Name of the Featurestore. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + display_name (str): + Required. The user-defined name of the + Featurestore. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + Display name of a Featurestore must be unique + within a single Project and Location Pair. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was last updated. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your Featurestore. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one + Featurestore(System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + online_serving_config (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig): + Required. Config for online serving + resources. + state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): + Output only. State of the featurestore. + """ + + class State(proto.Enum): + r"""Possible states a Featurestore can have.""" + STATE_UNSPECIFIED = 0 + STABLE = 1 + UPDATING = 2 + + class OnlineServingConfig(proto.Message): + r"""OnlineServingConfig specifies the details for provisioning + online serving resources. + + Attributes: + fixed_node_count (int): + Required. The number of nodes for each + cluster. The number of nodes will not scale + automatically but can be scaled manually by + providing different values when updating. + """ + + fixed_node_count = proto.Field(proto.INT32, number=2) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + etag = proto.Field(proto.STRING, number=5) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + online_serving_config = proto.Field( + proto.MESSAGE, number=7, message=OnlineServingConfig, + ) + + state = proto.Field(proto.ENUM, number=8, enum=State,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py b/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py new file mode 100644 index 0000000000..815faaa6fb --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={"FeaturestoreMonitoringConfig",}, +) + + +class FeaturestoreMonitoringConfig(proto.Message): + r"""Configuration of how features in Featurestore are monitored. + + Attributes: + snapshot_analysis (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.SnapshotAnalysis): + The config for Snapshot Analysis Based + Feature Monitoring. + """ + + class SnapshotAnalysis(proto.Message): + r"""Configuration of the Featurestore's Snapshot Analysis Based + Monitoring. This type of analysis generates statistics for each + Feature based on a snapshot of the latest feature value of each + entities every monitoring_interval. + + Attributes: + disabled (bool): + The monitoring schedule for snapshot analysis. For + EntityType-level config: unset / disabled = true indicates + disabled by default for Features under it; otherwise by + default enable snapshot analysis monitoring with + monitoring_interval for Features under it. Feature-level + config: disabled = true indicates disabled regardless of the + EntityType-level config; unset monitoring_interval indicates + going with EntityType-level config; otherwise run snapshot + analysis monitoring with monitoring_interval regardless of + the EntityType-level config. Explicitly Disable the snapshot + analysis based monitoring. + monitoring_interval (google.protobuf.duration_pb2.Duration): + Configuration of the snapshot analysis based + monitoring pipeline running interval. The value + is rolled up to full day. + """ + + disabled = proto.Field(proto.BOOL, number=1) + + monitoring_interval = proto.Field( + proto.MESSAGE, number=2, message=duration.Duration, + ) + + snapshot_analysis = proto.Field(proto.MESSAGE, number=1, message=SnapshotAnalysis,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py new file mode 100644 index 0000000000..064b1ba2cd --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import ( + feature_selector as gca_feature_selector, +) +from google.cloud.aiplatform_v1beta1.types import types +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ReadFeatureValuesRequest", + "ReadFeatureValuesResponse", + "StreamingReadFeatureValuesRequest", + "FeatureValue", + "FeatureValueList", + }, +) + + +class ReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + entity_type (str): + Required. The resource name of the EntityType for the entity + being read. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be "user". + entity_id (str): + Required. ID for a specific entity. For example, for a + machine learning model predicting user clicks on a website, + an entity ID could be "user_123". + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. + """ + + entity_type = proto.Field(proto.STRING, number=1) + + entity_id = proto.Field(proto.STRING, number=2) + + feature_selector = proto.Field( + proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector, + ) + + +class ReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + header (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.Header): + Response header. + entity_view (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView): + Entity view with Feature values. This may be + the entity in the Featurestore if values for all + Features were requested, or a projection of the + entity in the Featurestore if values for only + some Features were requested. + """ + + class FeatureDescriptor(proto.Message): + r"""Metadata for requested Features. + + Attributes: + id (str): + Feature ID. + """ + + id = proto.Field(proto.STRING, number=1) + + class Header(proto.Message): + r"""Response header with metadata for the requested + [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest.entity_type] + and Features. + + Attributes: + entity_type (str): + The resource name of the EntityType from the + [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest]. + Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + feature_descriptors (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.FeatureDescriptor]): + List of Feature metadata corresponding to each piece of + [ReadFeatureValuesResponse.data][]. + """ + + entity_type = proto.Field(proto.STRING, number=1) + + feature_descriptors = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ReadFeatureValuesResponse.FeatureDescriptor", + ) + + class EntityView(proto.Message): + r"""Entity view with Feature values. + + Attributes: + entity_id (str): + ID of the requested entity. + data (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView.Data]): + Each piece of data holds the k requested values for one + requested Feature. If no values for the requested Feature + exist, the corresponding cell will be empty. This has the + same size and is in the same order as the features from the + header + [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1beta1.ReadFeatureValuesResponse.header]. + """ + + class Data(proto.Message): + r"""Container to hold value(s), successive in time, for one + Feature from the request. + + Attributes: + value (google.cloud.aiplatform_v1beta1.types.FeatureValue): + Feature value if a single value is requested. + values (google.cloud.aiplatform_v1beta1.types.FeatureValueList): + Feature values list if values, successive in + time, are requested. If the requested number of + values is greater than the number of existing + Feature values, nonexistent values are omitted + instead of being returned as empty. + """ + + value = proto.Field( + proto.MESSAGE, number=1, oneof="data", message="FeatureValue", + ) + + values = proto.Field( + proto.MESSAGE, number=2, oneof="data", message="FeatureValueList", + ) + + entity_id = proto.Field(proto.STRING, number=1) + + data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ReadFeatureValuesResponse.EntityView.Data", + ) + + header = proto.Field(proto.MESSAGE, number=1, message=Header,) + + entity_view = proto.Field(proto.MESSAGE, number=2, message=EntityView,) + + +class StreamingReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + + Attributes: + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be "user". + entity_ids (Sequence[str]): + Required. IDs of entities to read Feature values of. For + example, for a machine learning model predicting user clicks + on a website, an entity ID could be "user_123". + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. + """ + + entity_type = proto.Field(proto.STRING, number=1) + + entity_ids = proto.RepeatedField(proto.STRING, number=2) + + feature_selector = proto.Field( + proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector, + ) + + +class FeatureValue(proto.Message): + r"""Value for a feature. + NEXT ID: 15 + + Attributes: + bool_value (bool): + Bool type feature value. + double_value (float): + Double type feature value. + int64_value (int): + Int64 feature value. + string_value (str): + String feature value. + bool_array_value (google.cloud.aiplatform_v1beta1.types.BoolArray): + A list of bool type feature value. + double_array_value (google.cloud.aiplatform_v1beta1.types.DoubleArray): + A list of double type feature value. + int64_array_value (google.cloud.aiplatform_v1beta1.types.Int64Array): + A list of int64 type feature value. + string_array_value (google.cloud.aiplatform_v1beta1.types.StringArray): + A list of string type feature value. + bytes_value (bytes): + Bytes feature value. + metadata (google.cloud.aiplatform_v1beta1.types.FeatureValue.Metadata): + Output only. Metadata of feature value. + """ + + class Metadata(proto.Message): + r"""Metadata of feature value. + + Attributes: + generate_time (google.protobuf.timestamp_pb2.Timestamp): + Feature generation timestamp. Typically, it + is provided by user at feature ingestion time. + If not, feature store will use the system + timestamp when the data is ingested into feature + store. + """ + + generate_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp.Timestamp, + ) + + bool_value = proto.Field(proto.BOOL, number=1, oneof="value") + + double_value = proto.Field(proto.DOUBLE, number=2, oneof="value") + + int64_value = proto.Field(proto.INT64, number=5, oneof="value") + + string_value = proto.Field(proto.STRING, number=6, oneof="value") + + bool_array_value = proto.Field( + proto.MESSAGE, number=7, oneof="value", message=types.BoolArray, + ) + + double_array_value = proto.Field( + proto.MESSAGE, number=8, oneof="value", message=types.DoubleArray, + ) + + int64_array_value = proto.Field( + proto.MESSAGE, number=11, oneof="value", message=types.Int64Array, + ) + + string_array_value = proto.Field( + proto.MESSAGE, number=12, oneof="value", message=types.StringArray, + ) + + bytes_value = proto.Field(proto.BYTES, number=13, oneof="value") + + metadata = proto.Field(proto.MESSAGE, number=14, message=Metadata,) + + +class FeatureValueList(proto.Message): + r"""Container for list of values. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureValue]): + A list of feature values. All of them should + be the same data type. + """ + + values = proto.RepeatedField(proto.MESSAGE, number=1, message="FeatureValue",) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py new file mode 100644 index 0000000000..46b91f45d4 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -0,0 +1,1281 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import ( + feature_selector as gca_feature_selector, +) +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateFeaturestoreRequest", + "GetFeaturestoreRequest", + "ListFeaturestoresRequest", + "ListFeaturestoresResponse", + "UpdateFeaturestoreRequest", + "DeleteFeaturestoreRequest", + "ImportFeatureValuesRequest", + "ImportFeatureValuesResponse", + "BatchReadFeatureValuesRequest", + "ExportFeatureValuesRequest", + "DestinationFeatureSetting", + "FeatureValueDestination", + "ExportFeatureValuesResponse", + "BatchReadFeatureValuesResponse", + "CreateEntityTypeRequest", + "GetEntityTypeRequest", + "ListEntityTypesRequest", + "ListEntityTypesResponse", + "UpdateEntityTypeRequest", + "DeleteEntityTypeRequest", + "CreateFeatureRequest", + "BatchCreateFeaturesRequest", + "BatchCreateFeaturesResponse", + "GetFeatureRequest", + "ListFeaturesRequest", + "ListFeaturesResponse", + "SearchFeaturesRequest", + "SearchFeaturesResponse", + "UpdateFeatureRequest", + "DeleteFeatureRequest", + "CreateFeaturestoreOperationMetadata", + "UpdateFeaturestoreOperationMetadata", + "ImportFeatureValuesOperationMetadata", + "ExportFeatureValuesOperationMetadata", + "BatchReadFeatureValuesOperationMetadata", + "CreateEntityTypeOperationMetadata", + "CreateFeatureOperationMetadata", + "BatchCreateFeaturesOperationMetadata", + }, +) + + +class CreateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + + Attributes: + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore to create. + featurestore_id (str): + Required. The ID to use for this Featurestore, which will + become the final component of the Featurestore's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within the project and location. + """ + + parent = proto.Field(proto.STRING, number=1) + + featurestore = proto.Field( + proto.MESSAGE, number=2, message=gca_featurestore.Featurestore, + ) + + featurestore_id = proto.Field(proto.STRING, number=3) + + +class GetFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore + resource. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListFeaturestoresRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the featurestores that match the filter expression. + The following fields are supported: + + - ``display_name``: Supports =, != comparisons. + - ``create_time``: Supports =, !=, <, >, <=, and >= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, <=, and >= + comparisons. Values must be in RFC 3339 format. + - ``online_serving_config.fixed_node_count``: Supports =, + !=, <, >, <=, and >= comparisons. + - ``labels``: Supports key-value equality and key presence. + + Examples: + + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + Featurestores created or updated after 2020-01-01. + - ``labels.env = "prod"`` Featurestores with label "env" + set to "prod". + page_size (int): + The maximum number of Featurestores to + return. The service may return fewer than this + value. If unspecified, at most 100 Featurestores + will be returned. The maximum value is 100; any + value greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported Fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + - ``online_serving_config.fixed_node_count`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask.FieldMask,) + + +class ListFeaturestoresResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Attributes: + featurestores (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore]): + The Featurestores matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturestoresRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + featurestores = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_featurestore.Featurestore, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + + Attributes: + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Featurestore resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``display_name`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.max_online_serving_size`` + """ + + featurestore = proto.Field( + proto.MESSAGE, number=1, message=gca_featurestore.Featurestore, + ) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + force (bool): + If set to true, any EntityTypes and Features + for this Featurestore will also be deleted. + (Otherwise, the request will only work if the + Featurestore has no EntityTypes.) + """ + + name = proto.Field(proto.STRING, number=1) + + force = proto.Field(proto.BOOL, number=2) + + +class ImportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + avro_source (google.cloud.aiplatform_v1beta1.types.AvroSource): + + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + + csv_source (google.cloud.aiplatform_v1beta1.types.CsvSource): + + feature_time_field (str): + Source column that holds the Feature + timestamp for all Feature values in each entity. + feature_time (google.protobuf.timestamp_pb2.Timestamp): + Single Feature timestamp for all entities + being imported. The timestamp must not have + higher than millisecond precision. + entity_type (str): + Required. The resource name of the EntityType grouping the + Features for which values are being imported. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + entity_id_field (str): + Source column that holds entity IDs. If not provided, entity + IDs are extracted from the column named ``entity_id``. + feature_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest.FeatureSpec]): + Required. Specifications defining which Feature values to + import from the entity. The request fails if no + feature_specs are provided, and having multiple + feature_specs for one Feature is not allowed. + disable_online_serving (bool): + If set, data will not be imported for online + serving. This is typically used for backfilling, + where Feature generation timestamps are not in + the timestamp range needed for online serving. + worker_count (int): + Specifies the number of workers that are used + to write data to the Featurestore. Consider the + online serving capacity that you require to + achieve the desired import throughput without + interfering with online serving. The value must + be positive, and less than or equal to 100. If + not set, defaults to using 1 worker. The low + count ensures minimal impact on online serving + performance. + """ + + class FeatureSpec(proto.Message): + r"""Defines the Feature value(s) to import. + + Attributes: + id (str): + Required. ID of the Feature to import values + of. This Feature must exist in the target + EntityType, or the request will fail. + source_field (str): + Source column to get the Feature values from. + If not set, uses the column with the same name + as the Feature ID. + """ + + id = proto.Field(proto.STRING, number=1) + + source_field = proto.Field(proto.STRING, number=2) + + avro_source = proto.Field( + proto.MESSAGE, number=2, oneof="source", message=io.AvroSource, + ) + + bigquery_source = proto.Field( + proto.MESSAGE, number=3, oneof="source", message=io.BigQuerySource, + ) + + csv_source = proto.Field( + proto.MESSAGE, number=4, oneof="source", message=io.CsvSource, + ) + + feature_time_field = proto.Field( + proto.STRING, number=6, oneof="feature_time_source" + ) + + feature_time = proto.Field( + proto.MESSAGE, + number=7, + oneof="feature_time_source", + message=timestamp.Timestamp, + ) + + entity_type = proto.Field(proto.STRING, number=1) + + entity_id_field = proto.Field(proto.STRING, number=5) + + feature_specs = proto.RepeatedField(proto.MESSAGE, number=8, message=FeatureSpec,) + + disable_online_serving = proto.Field(proto.BOOL, number=9) + + worker_count = proto.Field(proto.INT32, number=11) + + +class ImportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of Feature values that have been + imported by the operation. + """ + + imported_entity_count = proto.Field(proto.INT64, number=1) + + imported_feature_value_count = proto.Field(proto.INT64, number=2) + + +class BatchReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + (- Next Id: 6 -) + + Attributes: + csv_read_instances (google.cloud.aiplatform_v1beta1.types.CsvSource): + Each read instance consists of exactly one read timestamp + and one or more entity IDs identifying entities of the + corresponding EntityTypes whose Features are requested. + + Each output instance contains Feature values of requested + entities concatenated together as of the read time. + + An example read instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. + + An example output instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. + + Timestamp in each read instance must be millisecond-aligned. + + ``csv_read_instances`` are read instances stored in a + plain-text CSV file. The header should be: + [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp + + The columns can be in any order. + + Values in the timestamp column must use the RFC 3339 format, + e.g. ``2012-07-30T10:43:17.123Z``. + featurestore (str): + Required. The resource name of the Featurestore from which + to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): + Required. Specifies output location and + format. + entity_type_specs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): + Required. Specifies EntityType grouping Features to read + values of and settings. Each EntityType referenced in + [BatchReadFeatureValuesRequest.entity_type_specs] must have + a column specifying entity IDs in tha EntityType in + [BatchReadFeatureValuesRequest.request][] . + """ + + class EntityTypeSpec(proto.Message): + r"""Selects Features of an EntityType to read values of and + specifies read settings. + + Attributes: + entity_type_id (str): + Required. ID of the EntityType to select Features. The + EntityType id is the + [entity_type_id][google.cloud.aiplatform.v1beta1.CreateEntityTypeRequest.entity_type_id] + specified during EntityType creation. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selectors choosing which Feature + values to read from the EntityType. + settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): + Per-Feature settings for the batch read. + """ + + entity_type_id = proto.Field(proto.STRING, number=1) + + feature_selector = proto.Field( + proto.MESSAGE, number=2, message=gca_feature_selector.FeatureSelector, + ) + + settings = proto.RepeatedField( + proto.MESSAGE, number=3, message="DestinationFeatureSetting", + ) + + csv_read_instances = proto.Field( + proto.MESSAGE, number=3, oneof="read_option", message=io.CsvSource, + ) + + featurestore = proto.Field(proto.STRING, number=1) + + destination = proto.Field( + proto.MESSAGE, number=4, message="FeatureValueDestination", + ) + + entity_type_specs = proto.RepeatedField( + proto.MESSAGE, number=7, message=EntityTypeSpec, + ) + + +class ExportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + Attributes: + snapshot_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.SnapshotExport): + Exports Feature values of all entities of the + EntityType as of a snapshot time. + entity_type (str): + Required. The resource name of the EntityType from which to + export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): + Required. Specifies destination location and + format. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selects Features to export values + of. + settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): + Per-Feature export settings. + """ + + class SnapshotExport(proto.Message): + r"""Describes exporting Feature values as of the snapshot + timestamp. + + Attributes: + snapshot_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + snapshot_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp.Timestamp, + ) + + snapshot_export = proto.Field( + proto.MESSAGE, number=3, oneof="mode", message=SnapshotExport, + ) + + entity_type = proto.Field(proto.STRING, number=1) + + destination = proto.Field( + proto.MESSAGE, number=4, message="FeatureValueDestination", + ) + + feature_selector = proto.Field( + proto.MESSAGE, number=5, message=gca_feature_selector.FeatureSelector, + ) + + settings = proto.RepeatedField( + proto.MESSAGE, number=6, message="DestinationFeatureSetting", + ) + + +class DestinationFeatureSetting(proto.Message): + r""" + + Attributes: + feature_id (str): + Required. The ID of the Feature to apply the + setting to. + destination_field (str): + Specify the field name in the export + destination. If not specified, Feature ID is + used. + """ + + feature_id = proto.Field(proto.STRING, number=1) + + destination_field = proto.Field(proto.STRING, number=2) + + +class FeatureValueDestination(proto.Message): + r"""A destination location for Feature values and format. + + Attributes: + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + Output in BigQuery format. + [BigQueryDestination.output_uri][google.cloud.aiplatform.v1beta1.BigQueryDestination.output_uri] + in + [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1beta1.FeatureValueDestination.bigquery_destination] + must refer to a table. + tfrecord_destination (google.cloud.aiplatform_v1beta1.types.TFRecordDestination): + Output in TFRecord format. + + Below are the mapping from Feature value type in + Featurestore to Feature value type in TFRecord: + + :: + + Value type in Featurestore | Value type in TFRecord + DOUBLE, DOUBLE_ARRAY | FLOAT_LIST + INT64, INT64_ARRAY | INT64_LIST + STRING, STRING_ARRAY, BYTES | BYTES_LIST + true -> byte_string("true"), false -> byte_string("false") + BOOL, BOOL_ARRAY (true, false) | BYTES_LIST + csv_destination (google.cloud.aiplatform_v1beta1.types.CsvDestination): + Output in CSV format. Array Feature value + types are not allowed in CSV format. + """ + + bigquery_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message=io.BigQueryDestination, + ) + + tfrecord_destination = proto.Field( + proto.MESSAGE, number=2, oneof="destination", message=io.TFRecordDestination, + ) + + csv_destination = proto.Field( + proto.MESSAGE, number=3, oneof="destination", message=io.CsvDestination, + ) + + +class ExportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + """ + + +class BatchReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + """ + + +class CreateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to create + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + The EntityType to create. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within a featurestore. + """ + + parent = proto.Field(proto.STRING, number=1) + + entity_type = proto.Field( + proto.MESSAGE, number=2, message=gca_entity_type.EntityType, + ) + + entity_type_id = proto.Field(proto.STRING, number=3) + + +class GetEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + + Attributes: + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListEntityTypesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + filter (str): + Lists the EntityTypes that match the filter expression. The + following filters are supported: + + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + EntityTypes having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any EntityType which has a label + with 'env' as the key. + page_size (int): + The maximum number of EntityTypes to return. + The service may return fewer than this value. If + unspecified, at most 1000 EntityTypes will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. + + Supported fields: + + - ``entity_type_id`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask.FieldMask,) + + +class ListEntityTypesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Attributes: + entity_types (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType]): + The EntityTypes matching the request. + next_page_token (str): + A token, which can be sent as + [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1beta1.ListEntityTypesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + entity_types = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_entity_type.EntityType, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + + Attributes: + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the EntityType resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + """ + + entity_type = proto.Field( + proto.MESSAGE, number=1, message=gca_entity_type.EntityType, + ) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteEntityTypeRequest(proto.Message): + r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. + + Attributes: + name (str): + Required. The name of the EntityType to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + force (bool): + If set to true, any Features for this + EntityType will also be deleted. (Otherwise, the + request will only work if the EntityType has no + Features.) + """ + + name = proto.Field(proto.STRING, number=1) + + force = proto.Field(proto.BOOL, number=2) + + +class CreateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create a + Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature to create. + feature_id (str): + Required. The ID to use for the Feature, which will become + the final component of the Feature's resource name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within an EntityType. + """ + + parent = proto.Field(proto.STRING, number=1) + + feature = proto.Field(proto.MESSAGE, number=2, message=gca_feature.Feature,) + + feature_id = proto.Field(proto.STRING, number=3) + + +class BatchCreateFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create the + batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same parent + EntityType. The ``parent`` field in each child request + message can be omitted. If ``parent`` is set in a child + request, then the value must match the ``parent`` value in + this request message. + """ + + parent = proto.Field(proto.STRING, number=1) + + requests = proto.RepeatedField( + proto.MESSAGE, number=2, message="CreateFeatureRequest", + ) + + +class BatchCreateFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features created. + """ + + features = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_feature.Feature, + ) + + +class GetFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + + Attributes: + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + filter (str): + Lists the Features that match the filter expression. The + following filters are supported: + + - ``value_type``: Supports = and != comparisons. + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + 'env' as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 1000 Features will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``feature_id`` + - ``value_type`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + latest_stats_count (int): + If set, return the most recent + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count] + of stats for each Feature in response. Valid value is [0, + 10]. If number of stats exists < + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count], + return all existing stats. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask.FieldMask,) + + latest_stats_count = proto.Field(proto.INT32, number=7) + + +class ListFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_feature.Feature, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class SearchFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Attributes: + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. Field-restricted + queries and filters can be combined using ``AND`` to form a + conjunction. + + A field query is in the form FIELD:QUERY. This implicitly + checks if QUERY exists as a substring within Feature's + FIELD. The QUERY and the FIELD are converted to a sequence + of words (i.e. tokens) for comparison. This is done by: + + - Removing leading/trailing whitespace and tokenizing the + search value. Characters that are not one of alphanumeric + [a-zA-Z0-9], underscore [_], or asterisk [*] are treated + as delimiters for tokens. (*) is treated as a wildcard + that matches characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double quotation + marks ("). With phrases, the order of the words is + important. Words in the phrase must be matching in order and + consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with ID + containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. + + Besides field queries, the following exact-match filters are + supported. The exact-match filters do not support wildcards. + Unlike field-restricted queries, exact-match filters are + case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + ``env`` as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 100 Features will be + returned. The maximum value is 100; any value + greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures], + except ``page_size``, must match the call that provided the + page token. + """ + + location = proto.Field(proto.STRING, number=1) + + query = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=5) + + +class SearchFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features matching the request. + + Fields returned: + + - ``name`` + - ``description`` + - ``labels`` + - ``create_time`` + - ``update_time`` + next_page_token (str): + A token, which can be sent as + [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_feature.Feature, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + + Attributes: + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature's ``name`` field is used to identify + the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Features resource by the update. The fields specified + in the update_mask are relative to the resource, not the + full request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be overwritten. + Set the update_mask to ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + """ + + feature = proto.Field(proto.MESSAGE, number=1, message=gca_feature.Feature,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + + Attributes: + name (str): + Required. The name of the Features to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform create Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class UpdateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform update Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class ImportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that perform import feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore import + feature values. + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of feature values that have been + imported by the operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + imported_entity_count = proto.Field(proto.INT64, number=2) + + imported_feature_value_count = proto.Field(proto.INT64, number=3) + + +class ExportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that exports Features values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore export + Feature values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class BatchReadFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that batch reads Feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore batch + read Features values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class CreateEntityTypeOperationMetadata(proto.Message): + r"""Details of operations that perform create EntityType. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for EntityType. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class CreateFeatureOperationMetadata(proto.Message): + r"""Details of operations that perform create Feature. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class BatchCreateFeaturesOperationMetadata(proto.Message): + r"""Details of operations that perform batch create Features. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/index.py b/google/cloud/aiplatform_v1beta1/types/index.py new file mode 100644 index 0000000000..fcb8371935 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/index.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Index",}, +) + + +class Index(proto.Message): + r"""A representation of a collection of database items organized + in a way that allows for approximate nearest neighbor (a.k.a + ANN) algorithms search. + + Attributes: + name (str): + Output only. The resource name of the Index. + display_name (str): + Required. The display name of the Index. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Index. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Index, + that is specific to it. Unset if the Index does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + An additional information about the Index; the schema of the + metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri]. + deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]): + Output only. The pointers to DeployedIndexes + created from this Index. An Index can be only + deleted if all its DeployedIndexes had been + undeployed first. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]): + The labels with user-defined metadata to + organize your Indexes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was most recently + updated. This also includes any update to the contents of + the Index. Note that Operations working on this Index may + have their + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] + a little after the value of this timestamp, yet that does + not mean their results are not already reflected in the + Index. Result of any successfully completed Operation on the + Index is reflected in it. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + metadata_schema_uri = proto.Field(proto.STRING, number=4) + + metadata = proto.Field(proto.MESSAGE, number=6, message=struct.Value,) + + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, number=7, message=deployed_index_ref.DeployedIndexRef, + ) + + etag = proto.Field(proto.STRING, number=8) + + labels = proto.MapField(proto.STRING, proto.STRING, number=9) + + create_time = proto.Field(proto.MESSAGE, number=10, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py new file mode 100644 index 0000000000..445d7a71bd --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "IndexEndpoint", + "DeployedIndex", + "DeployedIndexAuthConfig", + "IndexPrivateEndpoints", + }, +) + + +class IndexEndpoint(proto.Message): + r"""Indexes are deployed into it. An IndexEndpoint can have + multiple DeployedIndexes. + + Attributes: + name (str): + Output only. The resource name of the + IndexEndpoint. + display_name (str): + Required. The display name of the + IndexEndpoint. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + description (str): + The description of the IndexEndpoint. + deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndex]): + Output only. The indexes deployed in this + endpoint. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint.LabelsEntry]): + The labels with user-defined metadata to + organize your IndexEndpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was last updated. This timestamp + is not updated when the endpoint's + DeployedIndexes are updated, e.g. due to updates + of the original Indexes they are the deployments + of. + network (str): + Required. Immutable. The full name of the Google Compute + Engine + `network `__ + to which the IndexEndpoint should be peered. + + Private services access must already be configured for the + network. If left unspecified, the Endpoint is not peered + with any network. + + `Format `__: + projects/{project}/global/networks/{network}. Where + {project} is a project number, as in '12345', and {network} + is network name. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, number=4, message="DeployedIndex", + ) + + etag = proto.Field(proto.STRING, number=5) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,) + + network = proto.Field(proto.STRING, number=9) + + +class DeployedIndex(proto.Message): + r"""A deployment of an Index. IndexEndpoints contain one or more + DeployedIndexes. + + Attributes: + id (str): + Required. The user specified ID of the + DeployedIndex. The ID can be up to 128 + characters long and must start with a letter and + only contain letters, numbers, and underscores. + The ID must be unique within the project it is + created in. + index (str): + Required. The name of the Index this is the + deployment of. We may refer to this Index as the + DeployedIndex's "original" Index. + display_name (str): + The display name of the DeployedIndex. If not provided upon + creation, the Index's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedIndex + was created. + private_endpoints (google.cloud.aiplatform_v1beta1.types.IndexPrivateEndpoints): + Output only. Provides paths for users to send requests + directly to the deployed index services running on Cloud via + private services access. This field is populated if + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + is configured. + index_sync_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The DeployedIndex may depend on various data on + its original Index. Additionally when certain changes to the + original Index are being done (e.g. when what the Index + contains is being changed) the DeployedIndex may be + asynchronously updated in the background to reflect this + changes. If this timestamp's value is at least the + [Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time] + of the original Index, it means that this DeployedIndex and + the original Index are in sync. If this timestamp is older, + then to see which updates this DeployedIndex already + contains (and which not), one must + [list][Operations.ListOperations] [Operations][Operation] + [working][Operation.name] on the original Index. Only the + successfully completed Operations with + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] + equal or before this sync time are contained in this + DeployedIndex. + automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): + Optional. A description of resources that the DeployedIndex + uses, which to large degree are decided by AI Platform, and + optionally allows only a modest additional configuration. If + min_replica_count is not set, the default value is 1. If + max_replica_count is not set, the default value is + min_replica_count. The max allowed replica count is 1000. + + The user is billed for the resources (at least their minimal + amount) even if the DeployedIndex receives no traffic. + enable_access_logging (bool): + Optional. If true, private endpoint's access + logs are sent to StackDriver Logging. + These logs are like standard server access logs, + containing information like timestamp and + latency for each MatchRequest. + Note that Stackdriver logs may incur a cost, + especially if the deployed index receives a high + queries per second rate (QPS). Estimate your + costs before enabling this option. + deployed_index_auth_config (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig): + Optional. If set, the authentication is + enabled for the private endpoint. + """ + + id = proto.Field(proto.STRING, number=1) + + index = proto.Field(proto.STRING, number=2) + + display_name = proto.Field(proto.STRING, number=3) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + private_endpoints = proto.Field( + proto.MESSAGE, number=5, message="IndexPrivateEndpoints", + ) + + index_sync_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,) + + automatic_resources = proto.Field( + proto.MESSAGE, number=7, message=machine_resources.AutomaticResources, + ) + + enable_access_logging = proto.Field(proto.BOOL, number=8) + + deployed_index_auth_config = proto.Field( + proto.MESSAGE, number=9, message="DeployedIndexAuthConfig", + ) + + +class DeployedIndexAuthConfig(proto.Message): + r"""Used to set up the auth on the DeployedIndex's private + endpoint. + + Attributes: + auth_provider (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig.AuthProvider): + Defines the authentication provider that the + DeployedIndex uses. + """ + + class AuthProvider(proto.Message): + r"""Configuration for an authentication provider, including support for + `JSON Web Token + (JWT) `__. + + Attributes: + audiences (Sequence[str]): + The list of JWT + `audiences `__. + that are allowed to access. A JWT containing any of these + audiences will be accepted. + allowed_issuers (Sequence[str]): + A list of allowed JWT issuers. Each entry must be a valid + Google service account, in the following format: + + ``service-account-name@project-id.iam.gserviceaccount.com`` + """ + + audiences = proto.RepeatedField(proto.STRING, number=1) + + allowed_issuers = proto.RepeatedField(proto.STRING, number=2) + + auth_provider = proto.Field(proto.MESSAGE, number=1, message=AuthProvider,) + + +class IndexPrivateEndpoints(proto.Message): + r"""IndexPrivateEndpoints proto is used to provide paths for + users to send requests via private services access. + + Attributes: + match_grpc_address (str): + Output only. The ip address used to send + match gRPC requests. + """ + + match_grpc_address = proto.Field(proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py new file mode 100644 index 0000000000..7ab0cf5174 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateIndexEndpointRequest", + "CreateIndexEndpointOperationMetadata", + "GetIndexEndpointRequest", + "ListIndexEndpointsRequest", + "ListIndexEndpointsResponse", + "UpdateIndexEndpointRequest", + "DeleteIndexEndpointRequest", + "DeployIndexRequest", + "DeployIndexResponse", + "DeployIndexOperationMetadata", + "UndeployIndexRequest", + "UndeployIndexResponse", + "UndeployIndexOperationMetadata", + }, +) + + +class CreateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + index_endpoint = proto.Field( + proto.MESSAGE, number=2, message=gca_index_endpoint.IndexEndpoint, + ) + + +class CreateIndexEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class GetIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListIndexEndpointsRequest(proto.Message): + r"""Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``index_endpoint`` supports = and !=. ``index_endpoint`` + represents the IndexEndpoint ID, ie. the last segment of + the IndexEndpoint's + [resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name]. + - ``display_name`` supports =, != and regex() (uses + `re2 `__ + syntax) + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality + ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a + key"`. + + Some examples: + + - ``index_endpoint="1"`` + - ``display_name="myDisplayName"`` + - \`regex(display_name, "^A") -> The display name starts + with an A. + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token] + of the previous + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,) + + +class ListIndexEndpointsResponse(proto.Message): + r"""Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + index_endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint]): + List of IndexEndpoints in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + index_endpoints = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_index_endpoint.IndexEndpoint, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + + Attributes: + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + `FieldMask `__. + """ + + index_endpoint = proto.Field( + proto.MESSAGE, number=1, message=gca_index_endpoint.IndexEndpoint, + ) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class DeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be created + within the IndexEndpoint. + """ + + index_endpoint = proto.Field(proto.STRING, number=1) + + deployed_index = proto.Field( + proto.MESSAGE, number=2, message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + The DeployedIndex that had been deployed in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, number=1, message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class UndeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource from which + to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index_id (str): + Required. The ID of the DeployedIndex to be + undeployed from the IndexEndpoint. + """ + + index_endpoint = proto.Field(proto.STRING, number=1) + + deployed_index_id = proto.Field(proto.STRING, number=2) + + +class UndeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + """ + + +class UndeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/index_service.py b/google/cloud/aiplatform_v1beta1/types/index_service.py new file mode 100644 index 0000000000..123858d8ad --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/index_service.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateIndexRequest", + "CreateIndexOperationMetadata", + "GetIndexRequest", + "ListIndexesRequest", + "ListIndexesResponse", + "UpdateIndexRequest", + "UpdateIndexOperationMetadata", + "DeleteIndexRequest", + "NearestNeighborSearchOperationMetadata", + }, +) + + +class CreateIndexRequest(proto.Message): + r"""Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Index in. Format: + ``projects/{project}/locations/{location}`` + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + index = proto.Field(proto.MESSAGE, number=2, message=gca_index.Index,) + + +class CreateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, number=2, message="NearestNeighborSearchOperationMetadata", + ) + + +class GetIndexRequest(proto.Message): + r"""Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + + Attributes: + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListIndexesRequest(proto.Message): + r"""Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Indexes. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexesResponse.next_page_token] + of the previous + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,) + + +class ListIndexesResponse(proto.Message): + r"""Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Attributes: + indexes (Sequence[google.cloud.aiplatform_v1beta1.types.Index]): + List of indexes in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexesRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + indexes = proto.RepeatedField(proto.MESSAGE, number=1, message=gca_index.Index,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateIndexRequest(proto.Message): + r"""Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + + Attributes: + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index which updates the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + `FieldMask `__. + """ + + index = proto.Field(proto.MESSAGE, number=1, message=gca_index.Index,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class UpdateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, number=2, message="NearestNeighborSearchOperationMetadata", + ) + + +class DeleteIndexRequest(proto.Message): + r"""Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + + Attributes: + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class NearestNeighborSearchOperationMetadata(proto.Message): + r"""Runtime operation metadata with regard to Matching Engine + Index. + + Attributes: + content_validation_stats (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.ContentValidationStats]): + The validation stats of the content (per file) to be + inserted or updated on the Matching Engine Index resource. + Populated if contentsDeltaUri is provided as part of + [Index.metadata][google.cloud.aiplatform.v1beta1.Index.metadata]. + Please note that, currently for those files that are broken + or has unsupported file format, we will not have the stats + for those files. + """ + + class RecordError(proto.Message): + r""" + + Attributes: + error_type (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): + The error type of this record. + error_message (str): + A human-readable message that is shown to the user to help + them fix the error. Note that this message may change from + time to time, your code should check against error_type as + the source of truth. + source_gcs_uri (str): + GCS uri pointing to the original file in + user's bucket. + embedding_id (str): + Empty if the embedding id is failed to parse. + raw_record (str): + The original content of this record. + """ + + class RecordErrorType(proto.Enum): + r"""""" + ERROR_TYPE_UNSPECIFIED = 0 + EMPTY_LINE = 1 + INVALID_JSON_SYNTAX = 2 + INVALID_CSV_SYNTAX = 3 + INVALID_AVRO_SYNTAX = 4 + INVALID_EMBEDDING_ID = 5 + EMBEDDING_SIZE_MISMATCH = 6 + NAMESPACE_MISSING = 7 + + error_type = proto.Field( + proto.ENUM, + number=1, + enum="NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType", + ) + + error_message = proto.Field(proto.STRING, number=2) + + source_gcs_uri = proto.Field(proto.STRING, number=3) + + embedding_id = proto.Field(proto.STRING, number=4) + + raw_record = proto.Field(proto.STRING, number=5) + + class ContentValidationStats(proto.Message): + r""" + + Attributes: + source_gcs_uri (str): + GCS uri pointing to the original file in + user's bucket. + valid_record_count (int): + Number of records in this file that were + successfully processed. + invalid_record_count (int): + Number of records in this file we skipped due + to validate errors. + partial_errors (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError]): + The detail information of the partial + failures encountered for those invalid records + that couldn't be parsed. Up to 50 partial errors + will be reported. + """ + + source_gcs_uri = proto.Field(proto.STRING, number=1) + + valid_record_count = proto.Field(proto.INT64, number=2) + + invalid_record_count = proto.Field(proto.INT64, number=3) + + partial_errors = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="NearestNeighborSearchOperationMetadata.RecordError", + ) + + content_validation_stats = proto.RepeatedField( + proto.MESSAGE, number=1, message=ContentValidationStats, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/io.py b/google/cloud/aiplatform_v1beta1/types/io.py index 3a177dcf9b..e18a20b132 100644 --- a/google/cloud/aiplatform_v1beta1/types/io.py +++ b/google/cloud/aiplatform_v1beta1/types/io.py @@ -21,15 +21,41 @@ __protobuf__ = proto.module( package="google.cloud.aiplatform.v1beta1", manifest={ + "AvroSource", + "CsvSource", "GcsSource", "GcsDestination", "BigQuerySource", "BigQueryDestination", + "CsvDestination", + "TFRecordDestination", "ContainerRegistryDestination", }, ) +class AvroSource(proto.Message): + r"""The storage details for Avro input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field(proto.MESSAGE, number=1, message="GcsSource",) + + +class CsvSource(proto.Message): + r"""The storage details for CSV input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field(proto.MESSAGE, number=1, message="GcsSource",) + + class GcsSource(proto.Message): r"""The Google Cloud Storage location for the input content. @@ -95,6 +121,28 @@ class BigQueryDestination(proto.Message): output_uri = proto.Field(proto.STRING, number=1) +class CsvDestination(proto.Message): + r"""The storage details for CSV output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field(proto.MESSAGE, number=1, message="GcsDestination",) + + +class TFRecordDestination(proto.Message): + r"""The storage details for TFRecord output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field(proto.MESSAGE, number=1, message="GcsDestination",) + + class ContainerRegistryDestination(proto.Message): r"""The Container Registry location for the container image. diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py index 514ca12f7a..778f323040 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_service.py +++ b/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -28,7 +28,12 @@ from google.cloud.aiplatform_v1beta1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import operation from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore __protobuf__ = proto.module( @@ -58,13 +63,24 @@ "ListBatchPredictionJobsResponse", "DeleteBatchPredictionJobRequest", "CancelBatchPredictionJobRequest", + "CreateModelDeploymentMonitoringJobRequest", + "SearchModelDeploymentMonitoringStatsAnomaliesRequest", + "SearchModelDeploymentMonitoringStatsAnomaliesResponse", + "GetModelDeploymentMonitoringJobRequest", + "ListModelDeploymentMonitoringJobsRequest", + "ListModelDeploymentMonitoringJobsResponse", + "UpdateModelDeploymentMonitoringJobRequest", + "DeleteModelDeploymentMonitoringJobRequest", + "PauseModelDeploymentMonitoringJobRequest", + "ResumeModelDeploymentMonitoringJobRequest", + "UpdateModelDeploymentMonitoringJobOperationMetadata", }, ) class CreateCustomJobRequest(proto.Message): r"""Request message for - ``JobService.CreateCustomJob``. + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. Attributes: parent (str): @@ -82,7 +98,7 @@ class CreateCustomJobRequest(proto.Message): class GetCustomJobRequest(proto.Message): r"""Request message for - ``JobService.GetCustomJob``. + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. Attributes: name (str): @@ -95,7 +111,7 @@ class GetCustomJobRequest(proto.Message): class ListCustomJobsRequest(proto.Message): r"""Request message for - ``JobService.ListCustomJobs``. + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. Attributes: parent (str): @@ -124,9 +140,9 @@ class ListCustomJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListCustomJobsResponse.next_page_token`` + [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] of the previous - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -145,14 +161,14 @@ class ListCustomJobsRequest(proto.Message): class ListCustomJobsResponse(proto.Message): r"""Response message for - ``JobService.ListCustomJobs`` + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] Attributes: custom_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob]): List of CustomJobs in the requested page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListCustomJobsRequest.page_token`` + [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] to obtain that page. """ @@ -169,7 +185,7 @@ def raw_page(self): class DeleteCustomJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteCustomJob``. + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. Attributes: name (str): @@ -183,7 +199,7 @@ class DeleteCustomJobRequest(proto.Message): class CancelCustomJobRequest(proto.Message): r"""Request message for - ``JobService.CancelCustomJob``. + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. Attributes: name (str): @@ -196,7 +212,7 @@ class CancelCustomJobRequest(proto.Message): class CreateDataLabelingJobRequest(proto.Message): r"""Request message for - [DataLabelingJobService.CreateDataLabelingJob][]. + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. Attributes: parent (str): @@ -214,7 +230,8 @@ class CreateDataLabelingJobRequest(proto.Message): class GetDataLabelingJobRequest(proto.Message): - r"""Request message for [DataLabelingJobService.GetDataLabelingJob][]. + r"""Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. Attributes: name (str): @@ -226,7 +243,8 @@ class GetDataLabelingJobRequest(proto.Message): class ListDataLabelingJobsRequest(proto.Message): - r"""Request message for [DataLabelingJobService.ListDataLabelingJobs][]. + r"""Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. Attributes: parent (str): @@ -281,7 +299,7 @@ class ListDataLabelingJobsRequest(proto.Message): class ListDataLabelingJobsResponse(proto.Message): r"""Response message for - ``JobService.ListDataLabelingJobs``. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. Attributes: data_labeling_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob]): @@ -304,7 +322,7 @@ def raw_page(self): class DeleteDataLabelingJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteDataLabelingJob``. + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. Attributes: name (str): @@ -318,7 +336,7 @@ class DeleteDataLabelingJobRequest(proto.Message): class CancelDataLabelingJobRequest(proto.Message): r"""Request message for - [DataLabelingJobService.CancelDataLabelingJob][]. + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. Attributes: name (str): @@ -331,7 +349,7 @@ class CancelDataLabelingJobRequest(proto.Message): class CreateHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.CreateHyperparameterTuningJob``. + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. Attributes: parent (str): @@ -354,7 +372,7 @@ class CreateHyperparameterTuningJobRequest(proto.Message): class GetHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.GetHyperparameterTuningJob``. + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. Attributes: name (str): @@ -368,7 +386,7 @@ class GetHyperparameterTuningJobRequest(proto.Message): class ListHyperparameterTuningJobsRequest(proto.Message): r"""Request message for - ``JobService.ListHyperparameterTuningJobs``. + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. Attributes: parent (str): @@ -397,9 +415,9 @@ class ListHyperparameterTuningJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListHyperparameterTuningJobsResponse.next_page_token`` + [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] of the previous - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -418,16 +436,16 @@ class ListHyperparameterTuningJobsRequest(proto.Message): class ListHyperparameterTuningJobsResponse(proto.Message): r"""Response message for - ``JobService.ListHyperparameterTuningJobs`` + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] Attributes: hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob]): List of HyperparameterTuningJobs in the requested page. - ``HyperparameterTuningJob.trials`` + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] of the jobs will be not be returned. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListHyperparameterTuningJobsRequest.page_token`` + [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] to obtain that page. """ @@ -446,7 +464,7 @@ def raw_page(self): class DeleteHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteHyperparameterTuningJob``. + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. Attributes: name (str): @@ -460,7 +478,7 @@ class DeleteHyperparameterTuningJobRequest(proto.Message): class CancelHyperparameterTuningJobRequest(proto.Message): r"""Request message for - ``JobService.CancelHyperparameterTuningJob``. + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. Attributes: name (str): @@ -474,7 +492,7 @@ class CancelHyperparameterTuningJobRequest(proto.Message): class CreateBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.CreateBatchPredictionJob``. + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. Attributes: parent (str): @@ -494,7 +512,7 @@ class CreateBatchPredictionJobRequest(proto.Message): class GetBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.GetBatchPredictionJob``. + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. Attributes: name (str): @@ -508,7 +526,7 @@ class GetBatchPredictionJobRequest(proto.Message): class ListBatchPredictionJobsRequest(proto.Message): r"""Request message for - ``JobService.ListBatchPredictionJobs``. + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. Attributes: parent (str): @@ -539,9 +557,9 @@ class ListBatchPredictionJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListBatchPredictionJobsResponse.next_page_token`` + [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] of the previous - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -560,7 +578,7 @@ class ListBatchPredictionJobsRequest(proto.Message): class ListBatchPredictionJobsResponse(proto.Message): r"""Response message for - ``JobService.ListBatchPredictionJobs`` + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] Attributes: batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob]): @@ -568,7 +586,7 @@ class ListBatchPredictionJobsResponse(proto.Message): page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListBatchPredictionJobsRequest.page_token`` + [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] to obtain that page. """ @@ -585,7 +603,7 @@ def raw_page(self): class DeleteBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.DeleteBatchPredictionJob``. + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. Attributes: name (str): @@ -599,7 +617,7 @@ class DeleteBatchPredictionJobRequest(proto.Message): class CancelBatchPredictionJobRequest(proto.Message): r"""Request message for - ``JobService.CancelBatchPredictionJob``. + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. Attributes: name (str): @@ -611,4 +629,281 @@ class CancelBatchPredictionJobRequest(proto.Message): name = proto.Field(proto.STRING, number=1) +class CreateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The ModelDeploymentMonitoringJob to + create + """ + + parent = proto.Field(proto.STRING, number=1) + + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + + +class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): + r"""Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} + deployed_model_id (str): + Required. The DeployedModel ID of the + [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + feature_display_name (str): + The feature display name. If specified, only return the + stats belonging to this feature. Format: + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name], + example: "user_destination". + objectives (Sequence[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest.StatsAnomaliesObjective]): + Required. Objectives of the stats to + retrieve. + page_size (int): + The standard list page size. + page_token (str): + A page token received from a previous + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The earliest timestamp of stats being + generated. If not set, indicates fetching stats + till the earliest possible one. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The latest timestamp of stats being + generated. If not set, indicates feching stats + till the latest possible one. + """ + + class StatsAnomaliesObjective(proto.Message): + r"""Stats requested for specific objective. + + Attributes: + type_ (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): + + top_feature_count (int): + If set, all attribution scores between + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] + and + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] + are fetched, and page token doesn't take affect in this + case. Only used to retrieve attribution score for the top + Features which has the highest attribution score in the + latest monitoring run. + """ + + type_ = proto.Field( + proto.ENUM, + number=1, + enum=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringObjectiveType, + ) + + top_feature_count = proto.Field(proto.INT32, number=4) + + model_deployment_monitoring_job = proto.Field(proto.STRING, number=1) + + deployed_model_id = proto.Field(proto.STRING, number=2) + + feature_display_name = proto.Field(proto.STRING, number=3) + + objectives = proto.RepeatedField( + proto.MESSAGE, number=4, message=StatsAnomaliesObjective, + ) + + page_size = proto.Field(proto.INT32, number=5) + + page_token = proto.Field(proto.STRING, number=6) + + start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,) + + +class SearchModelDeploymentMonitoringStatsAnomaliesResponse(proto.Message): + r"""Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies]): + Stats retrieved for requested objectives. There are at most + 1000 + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats] + in the response. + next_page_token (str): + The page token that can be used by the next + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + """ + + @property + def raw_page(self): + return self + + monitoring_stats = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListModelDeploymentMonitoringJobsRequest(proto.Message): + r"""Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,) + + +class ListModelDeploymentMonitoringJobsResponse(proto.Message): + r"""Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + model_deployment_monitoring_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob]): + A list of ModelDeploymentMonitoringJobs that + matches the specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + model_deployment_monitoring_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring configuration + which replaces the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the model monitoring job to + delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class PauseModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ResumeModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class UpdateModelDeploymentMonitoringJobOperationMetadata(proto.Message): + r"""Runtime operation information for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py b/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py new file mode 100644 index 0000000000..f4ff6b2d97 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"LineageSubgraph",}, +) + + +class LineageSubgraph(proto.Message): + r"""A subgraph of the overall lineage graph. Event edges connect + Artifact and Execution nodes. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + The Artifact nodes in the subgraph. + executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): + The Execution nodes in the subgraph. + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Event edges between Artifacts and + Executions in the subgraph. + """ + + artifacts = proto.RepeatedField(proto.MESSAGE, number=1, message=artifact.Artifact,) + + executions = proto.RepeatedField( + proto.MESSAGE, number=2, message=execution.Execution, + ) + + events = proto.RepeatedField(proto.MESSAGE, number=3, message=event.Event,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/google/cloud/aiplatform_v1beta1/types/machine_resources.py index c791354c58..d06e10f16e 100644 --- a/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -49,17 +49,17 @@ class MachineSpec(proto.Message): see https://tinyurl.com/aip-docs/training/configure-compute. For - ``DeployedModel`` + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] this field is optional, and the default value is ``n1-standard-2``. For - ``BatchPredictionJob`` + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] or as part of - ``WorkerPoolSpec`` + [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] this field is required. accelerator_type (google.cloud.aiplatform_v1beta1.types.AcceleratorType): Immutable. The type of accelerator(s) that may be attached to the machine as per - ``accelerator_count``. + [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]. accelerator_count (int): The number of accelerators to attach to the machine. @@ -89,10 +89,10 @@ class DedicatedResources(proto.Message): against it increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. Note: if - ``machine_spec.accelerator_count`` + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] is above 0, currently the model will be always deployed precisely on - ``min_replica_count``. + [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count]. max_replica_count (int): Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If @@ -103,7 +103,7 @@ class DedicatedResources(proto.Message): beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use - ``min_replica_count`` + [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] as the default value. autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1beta1.types.AutoscalingMetricSpec]): Immutable. The metric specifications that overrides a @@ -112,7 +112,7 @@ class DedicatedResources(proto.Message): set). At most one entry is allowed per metric. If - ``machine_spec.accelerator_count`` + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale @@ -120,18 +120,18 @@ class DedicatedResources(proto.Message): default target value is 60 for both metrics. If - ``machine_spec.accelerator_count`` + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set - ``autoscaling_metric_specs.metric_name`` + [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name] to ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` and - ``autoscaling_metric_specs.target`` + [autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target] to ``80``. """ @@ -157,7 +157,7 @@ class AutomaticResources(proto.Message): Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to - ``max_replica_count``, + [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. @@ -196,7 +196,7 @@ class BatchDedicatedResources(proto.Message): Immutable. The number of machine replicas used at the start of the batch operation. If not set, AI Platform decides starting number, not greater than - ``max_replica_count`` + [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] max_replica_count (int): Immutable. The maximum number of machine replicas the batch operation may be scaled to. diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py new file mode 100644 index 0000000000..d2c6f97fa8 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"MetadataSchema",}, +) + + +class MetadataSchema(proto.Message): + r"""Instance of a general MetadataSchema. + + Attributes: + name (str): + Output only. The resource name of the + MetadataSchema. + schema_version (str): + The version of the MetadataSchema. The version's format must + match the following regular expression: + ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to + order/compare different versions.Example: 1.0.0, 1.0.1, etc. + schema (str): + Required. The raw YAML string representation of the + MetadataSchema. The combination of [MetadataSchema.version] + and the schema name given by ``title`` in + [MetadataSchema.schema] must be unique within a + MetadataStore. + + The schema is defined as an OpenAPI 3.0.2 `MetadataSchema + Object `__ + schema_type (google.cloud.aiplatform_v1beta1.types.MetadataSchema.MetadataSchemaType): + The type of the MetadataSchema. This is a + property that identifies which metadata types + will use the MetadataSchema. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataSchema was created. + description (str): + Description of the Metadata Schema + """ + + class MetadataSchemaType(proto.Enum): + r"""Describes the type of the MetadataSchema.""" + METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 + ARTIFACT_TYPE = 1 + EXECUTION_TYPE = 2 + CONTEXT_TYPE = 3 + + name = proto.Field(proto.STRING, number=1) + + schema_version = proto.Field(proto.STRING, number=2) + + schema = proto.Field(proto.STRING, number=3) + + schema_type = proto.Field(proto.ENUM, number=4, enum=MetadataSchemaType,) + + create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + description = proto.Field(proto.STRING, number=6) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/google/cloud/aiplatform_v1beta1/types/metadata_service.py new file mode 100644 index 0000000000..3d755b3415 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -0,0 +1,971 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateMetadataStoreRequest", + "CreateMetadataStoreOperationMetadata", + "GetMetadataStoreRequest", + "ListMetadataStoresRequest", + "ListMetadataStoresResponse", + "DeleteMetadataStoreRequest", + "DeleteMetadataStoreOperationMetadata", + "CreateArtifactRequest", + "GetArtifactRequest", + "ListArtifactsRequest", + "ListArtifactsResponse", + "UpdateArtifactRequest", + "CreateContextRequest", + "GetContextRequest", + "ListContextsRequest", + "ListContextsResponse", + "UpdateContextRequest", + "DeleteContextRequest", + "AddContextArtifactsAndExecutionsRequest", + "AddContextArtifactsAndExecutionsResponse", + "AddContextChildrenRequest", + "AddContextChildrenResponse", + "QueryContextLineageSubgraphRequest", + "CreateExecutionRequest", + "GetExecutionRequest", + "ListExecutionsRequest", + "ListExecutionsResponse", + "UpdateExecutionRequest", + "AddExecutionEventsRequest", + "AddExecutionEventsResponse", + "QueryExecutionInputsAndOutputsRequest", + "CreateMetadataSchemaRequest", + "GetMetadataSchemaRequest", + "ListMetadataSchemasRequest", + "ListMetadataSchemasResponse", + "QueryArtifactLineageSubgraphRequest", + }, +) + + +class CreateMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + + Attributes: + parent (str): + Required. The resource name of the Location + where the MetadataStore should be created. + Format: projects/{project}/locations/{location}/ + metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): + Required. The MetadataStore to create. + metadata_store_id (str): + The {metadatastore} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be unique + across all MetadataStores in the parent Location. (Otherwise + the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataStore.) + """ + + parent = proto.Field(proto.STRING, number=1) + + metadata_store = proto.Field( + proto.MESSAGE, number=2, message=gca_metadata_store.MetadataStore, + ) + + metadata_store_id = proto.Field(proto.STRING, number=3) + + +class CreateMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for creating a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class GetMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the + MetadataStore to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListMetadataStoresRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Attributes: + parent (str): + Required. The Location whose MetadataStores + should be listed. Format: + projects/{project}/locations/{location} + page_size (int): + The maximum number of Metadata Stores to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListMetadataStoresResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Attributes: + metadata_stores (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataStore]): + The MetadataStores found for the Location. + next_page_token (str): + A token, which can be sent as + [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataStoresRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_stores = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_metadata_store.MetadataStore, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the + MetadataStore to delete. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + force (bool): + If set to true, any child resources of this MetadataStore + will be deleted. (Otherwise, the request will fail with a + FAILED_PRECONDITION error if the MetadataStore has any child + resources.) + """ + + name = proto.Field(proto.STRING, number=1) + + force = proto.Field(proto.BOOL, number=2) + + +class DeleteMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for deleting a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class CreateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the Artifact should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact to create. + artifact_id (str): + The {artifact} portion of the resource name with the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + If not provided, the Artifact's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are /[a-z][0-9]-/. Must be unique across all + Artifacts in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Artifact.) + """ + + parent = proto.Field(proto.STRING, number=1) + + artifact = proto.Field(proto.MESSAGE, number=2, message=gca_artifact.Artifact,) + + artifact_id = proto.Field(proto.STRING, number=3) + + +class GetArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact + to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Attributes: + parent (str): + Required. The MetadataStore whose Artifacts + should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of Artifacts to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + The supported set of filters include the following: + + 1. Attributes filtering e.g. display_name = "test" + + Supported fields include: name, display_name, uri, state, + schema_title, create_time and update_time. Time fields, + i.e. create_time and update_time, require values to + specified in RFC-3339 format. e.g. create_time = + "2020-11-19T11:30:00-04:00" + + 2. Metadata field To filter on metadata fields use traversal + operation as follows: metadata.. + e.g. metadata.field_1.number_value = 10.0 + + 3. Context based filtering To filter Artifacts based on the + contexts to which they belong use the function operator + with the full resource name "in_context()" e.g. + in_context("projects//locations//metadataStores//contexts/") + + Each of the above supported filter types can be combined + together using Logical operators (AND & OR). e.g. + display_name = "test" AND metadata.field1.bool_value = true. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=4) + + +class ListArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + The Artifacts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1beta1.ListArtifactsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + artifacts = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_artifact.Artifact, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + + Attributes: + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact containing updates. The Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. + allow_missing (bool): + If set to true, and the + [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not + found, a new + [Artifact][google.cloud.aiplatform.v1beta1.Artifact] will be + created. In this situation, ``update_mask`` is ignored. + """ + + artifact = proto.Field(proto.MESSAGE, number=1, message=gca_artifact.Artifact,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + allow_missing = proto.Field(proto.BOOL, number=3) + + +class CreateContextRequest(proto.Message): + r"""Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the Context should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context to create. + context_id (str): + The {context} portion of the resource name with the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + If not provided, the Context's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are /[a-z][0-9]-/. Must be unique across all + Contexts in the parent MetadataStore. (Otherwise the request + will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the + caller can't view the preexisting Context.) + """ + + parent = proto.Field(proto.STRING, number=1) + + context = proto.Field(proto.MESSAGE, number=2, message=gca_context.Context,) + + context_id = proto.Field(proto.STRING, number=3) + + +class GetContextRequest(proto.Message): + r"""Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + + Attributes: + name (str): + Required. The resource name of the Context to + retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListContextsRequest(proto.Message): + r"""Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + + Attributes: + parent (str): + Required. The MetadataStore whose Contexts + should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of Contexts to return. The + service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=4) + + +class ListContextsResponse(proto.Message): + r"""Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Attributes: + contexts (Sequence[google.cloud.aiplatform_v1beta1.types.Context]): + The Contexts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListContextsRequest.page_token][google.cloud.aiplatform.v1beta1.ListContextsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + contexts = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_context.Context, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateContextRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + + Attributes: + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. + allow_missing (bool): + If set to true, and the + [Context][google.cloud.aiplatform.v1beta1.Context] is not + found, a new + [Context][google.cloud.aiplatform.v1beta1.Context] will be + created. In this situation, ``update_mask`` is ignored. + """ + + context = proto.Field(proto.MESSAGE, number=1, message=gca_context.Context,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + allow_missing = proto.Field(proto.BOOL, number=3) + + +class DeleteContextRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + + Attributes: + name (str): + Required. The resource name of the Context to + retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + force (bool): + If set to true, any child resources of this Context will be + deleted. (Otherwise, the request will fail with a + FAILED_PRECONDITION error if the Context has any child + resources, such as another Context, Artifact, or Execution). + """ + + name = proto.Field(proto.STRING, number=1) + + force = proto.Field(proto.BOOL, number=2) + + +class AddContextArtifactsAndExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + Attributes: + context (str): + Required. The resource name of the Context + that the Artifacts and Executions belong to. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + artifacts (Sequence[str]): + The resource names of the Artifacts to + attribute to the Context. + executions (Sequence[str]): + The resource names of the Executions to + associate with the Context. + """ + + context = proto.Field(proto.STRING, number=1) + + artifacts = proto.RepeatedField(proto.STRING, number=2) + + executions = proto.RepeatedField(proto.STRING, number=3) + + +class AddContextArtifactsAndExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + """ + + +class AddContextChildrenRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + Attributes: + context (str): + Required. The resource name of the parent + Context. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + child_contexts (Sequence[str]): + The resource names of the child Contexts. + """ + + context = proto.Field(proto.STRING, number=1) + + child_contexts = proto.RepeatedField(proto.STRING, number=2) + + +class AddContextChildrenResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + """ + + +class QueryContextLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + + Attributes: + context (str): + Required. The resource name of the Context whose Artifacts + and Executions should be retrieved as a LineageSubgraph. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + """ + + context = proto.Field(proto.STRING, number=1) + + +class CreateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the Execution should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution to create. + execution_id (str): + The {execution} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + If not provided, the Execution's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are /[a-z][0-9]-/. Must be unique across all + Executions in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Execution.) + """ + + parent = proto.Field(proto.STRING, number=1) + + execution = proto.Field(proto.MESSAGE, number=2, message=gca_execution.Execution,) + + execution_id = proto.Field(proto.STRING, number=3) + + +class GetExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution + to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Attributes: + parent (str): + Required. The MetadataStore whose Executions + should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of Executions to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Executions + to satisfy in order to be part of the result set. The syntax + to define filter query is based on + https://google.aip.dev/160. Following are the supported set + of filters: + + 1. Attributes filtering e.g. display_name = "test" + + supported fields include: name, display_name, state, + schema_title, create_time and update_time. Time fields, + i.e. create_time and update_time, require values to + specified in RFC-3339 format. e.g. create_time = + "2020-11-19T11:30:00-04:00" + + 2. Metadata field To filter on metadata fields use traversal + operation as follows: metadata.. + e.g. metadata.field_1.number_value = 10.0 + + 3. Context based filtering To filter Executions based on the + contexts to which they belong use the function operator + with the full resource name "in_context()" e.g. + in_context("projects//locations//metadataStores//contexts/") + + Each of the above supported filters can be combined together + using Logical operators (AND & OR). e.g. display_name = + "test" AND metadata.field1.bool_value = true. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=4) + + +class ListExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Attributes: + executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): + The Executions retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExecutionsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + executions = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_execution.Execution, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + + Attributes: + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution containing updates. The Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. + allow_missing (bool): + If set to true, and the + [Execution][google.cloud.aiplatform.v1beta1.Execution] is + not found, a new + [Execution][google.cloud.aiplatform.v1beta1.Execution] will + be created. In this situation, ``update_mask`` is ignored. + """ + + execution = proto.Field(proto.MESSAGE, number=1, message=gca_execution.Execution,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + allow_missing = proto.Field(proto.BOOL, number=3) + + +class AddExecutionEventsRequest(proto.Message): + r"""Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + Attributes: + execution (str): + Required. The resource name of the Execution + that the Events connect Artifacts with. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Events to create and add. + """ + + execution = proto.Field(proto.STRING, number=1) + + events = proto.RepeatedField(proto.MESSAGE, number=2, message=event.Event,) + + +class AddExecutionEventsResponse(proto.Message): + r"""Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + """ + + +class QueryExecutionInputsAndOutputsRequest(proto.Message): + r"""Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + + Attributes: + execution (str): + Required. The resource name of the Execution + whose input and output Artifacts should be + retrieved as a LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + """ + + execution = proto.Field(proto.STRING, number=1) + + +class CreateMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the MetadataSchema should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): + Required. The MetadataSchema to create. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be unique + across all MetadataSchemas in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataSchema.) + """ + + parent = proto.Field(proto.STRING, number=1) + + metadata_schema = proto.Field( + proto.MESSAGE, number=2, message=gca_metadata_schema.MetadataSchema, + ) + + metadata_schema_id = proto.Field(proto.STRING, number=3) + + +class GetMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + + Attributes: + name (str): + Required. The resource name of the + MetadataSchema to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListMetadataSchemasRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Attributes: + parent (str): + Required. The MetadataStore whose + MetadataSchemas should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of MetadataSchemas to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + A query to filter available MetadataSchemas + for matching results. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=4) + + +class ListMetadataSchemasResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Attributes: + metadata_schemas (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataSchema]): + The MetadataSchemas found for the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataSchemasRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_schemas = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_metadata_schema.MetadataSchema, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class QueryArtifactLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + + Attributes: + artifact (str): + Required. The resource name of the Artifact whose Lineage + needs to be retrieved as a LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + max_hops (int): + Specifies the size of the lineage graph in terms of number + of hops from the specified artifact. Negative Value: + INVALID_ARGUMENT error is returned 0: Only input artifact is + returned. No value: Transitive closure is performed to + return the complete graph. + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the Lineage Subgraph. The + syntax to define filter query is based on + https://google.aip.dev/160. The supported set of filters + include the following: + + 1. Attributes filtering e.g. display_name = "test" + + supported fields include: name, display_name, uri, state, + schema_title, create_time and update_time. Time fields, + i.e. create_time and update_time, require values to + specified in RFC-3339 format. e.g. create_time = + "2020-11-19T11:30:00-04:00" + + 2. Metadata field To filter on metadata fields use traversal + operation as follows: metadata.. + e.g. metadata.field_1.number_value = 10.0 + + Each of the above supported filter types can be combined + together using Logical operators (AND & OR). e.g. + display_name = "test" AND metadata.field1.bool_value = true. + """ + + artifact = proto.Field(proto.STRING, number=1) + + max_hops = proto.Field(proto.INT32, number=2) + + filter = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/google/cloud/aiplatform_v1beta1/types/metadata_store.py new file mode 100644 index 0000000000..b57c00573a --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/metadata_store.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"MetadataStore",}, +) + + +class MetadataStore(proto.Message): + r"""Instance of a metadata store. Contains a set of metadata that + can be queried. + + Attributes: + name (str): + Output only. The resource name of the + MetadataStore instance. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was last updated. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for an + Metadata Store. If set, this Metadata Store and + all sub-resources of this Metadata Store will be + secured by this key. + description (str): + Description of the MetadataStore. + state (google.cloud.aiplatform_v1beta1.types.MetadataStore.MetadataStoreState): + Output only. State information of the + MetadataStore. + """ + + class MetadataStoreState(proto.Message): + r"""Represent state information for a MetadataStore. + + Attributes: + disk_utilization_bytes (int): + The disk utilization of the MetadataStore in + bytes. + """ + + disk_utilization_bytes = proto.Field(proto.INT64, number=1) + + name = proto.Field(proto.STRING, number=1) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + encryption_spec = proto.Field( + proto.MESSAGE, number=5, message=gca_encryption_spec.EncryptionSpec, + ) + + description = proto.Field(proto.STRING, number=6) + + state = proto.Field(proto.MESSAGE, number=7, message=MetadataStoreState,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/migration_service.py b/google/cloud/aiplatform_v1beta1/types/migration_service.py index de4c9466f6..f189abc783 100644 --- a/google/cloud/aiplatform_v1beta1/types/migration_service.py +++ b/google/cloud/aiplatform_v1beta1/types/migration_service.py @@ -41,7 +41,7 @@ class SearchMigratableResourcesRequest(proto.Message): r"""Request message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. Attributes: parent (str): @@ -56,21 +56,25 @@ class SearchMigratableResourcesRequest(proto.Message): page_token (str): The standard page token. filter (str): - Supported filters are: + A filter for your search. You can use the following types of + filters: - - Resource type: For a specific type of MigratableResource. + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]: - ``ml_engine_model_version:*`` - - ``automl_model:*``, + - ``automl_model:*`` - ``automl_dataset:*`` - - ``data_labeling_dataset:*``. + - ``data_labeling_dataset:*`` - - Migrated or not: Filter migrated resource or not by - last_migrate_time. + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: - - ``last_migrate_time:*`` will filter migrated + - ``last_migrate_time:*`` filters for migrated resources. - - ``NOT last_migrate_time:*`` will filter not yet + - ``NOT last_migrate_time:*`` filters for not yet migrated resources. """ @@ -85,7 +89,7 @@ class SearchMigratableResourcesRequest(proto.Message): class SearchMigratableResourcesResponse(proto.Message): r"""Response message for - ``MigrationService.SearchMigratableResources``. + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. Attributes: migratable_resources (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource]): @@ -110,7 +114,7 @@ def raw_page(self): class BatchMigrateResourcesRequest(proto.Message): r"""Request message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. Attributes: parent (str): @@ -287,7 +291,7 @@ class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): class BatchMigrateResourcesResponse(proto.Message): r"""Response message for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. Attributes: migrate_resource_responses (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceResponse]): @@ -324,7 +328,7 @@ class MigrateResourceResponse(proto.Message): class BatchMigrateResourcesOperationMetadata(proto.Message): r"""Runtime operation information for - ``MigrationService.BatchMigrateResources``. + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): @@ -336,7 +340,7 @@ class BatchMigrateResourcesOperationMetadata(proto.Message): class PartialResult(proto.Message): r"""Represents a partial result in batch migration operation for one - ``MigrateResourceRequest``. + [MigrateResourceRequest][google.cloud.aiplatform.v1beta1.MigrateResourceRequest]. Attributes: error (google.rpc.status_pb2.Status): diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index 4dcf6baefa..8608621480 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -47,9 +47,9 @@ class Model(proto.Message): predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): The schemata that describe formats of the Model's predictions and explanations as given and returned via - ``PredictionService.Predict`` + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] and - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. metadata_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, @@ -66,7 +66,7 @@ class Model(proto.Message): metadata (google.protobuf.struct_pb2.Value): Immutable. An additional information about the Model; the schema of the metadata can be found in - ``metadata_schema``. + [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri]. Unset if the Model does not have any additional information. supported_export_formats (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat]): Output only. The formats in which this Model @@ -80,7 +80,7 @@ class Model(proto.Message): Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon - ``ModelService.UploadModel``, + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], and all binaries it contains are copied and stored internally by AI Platform. Not present for AutoML Models. artifact_uri (str): @@ -91,73 +91,73 @@ class Model(proto.Message): Output only. When this Model is deployed, its prediction resources are described by the ``prediction_resources`` field of the - ``Endpoint.deployed_models`` + [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an - ``Endpoint`` and + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and does not support online predictions - (``PredictionService.Predict`` + ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or - ``PredictionService.Explain``). + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). Such a Model can serve predictions by using a - ``BatchPredictionJob``, + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], if it has at least one entry each in - ``supported_input_storage_formats`` + [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] and - ``supported_output_storage_formats``. + [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. supported_input_storage_formats (Sequence[str]): Output only. The formats this Model supports in - ``BatchPredictionJob.input_config``. + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If - ``PredictSchemata.instance_schema_uri`` + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] exists, the instances should be given as per that schema. The possible formats are: - ``jsonl`` The JSON Lines format, where each instance is a single line. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - ``csv`` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - ``tf-record`` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - ``tf-record-gzip`` Similar to ``tf-record``, but the file is gzipped. Uses - ``GcsSource``. + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - ``bigquery`` Each instance is a single row in BigQuery. Uses - ``BigQuerySource``. + [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. - ``file-list`` Each line of the file is the location of an instance to process, uses ``gcs_source`` field of the - ``InputConfig`` + [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] object. If this Model doesn't support any of these formats it means it cannot be used with a - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has - ``supported_deployment_resources_types``, + [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online predictions by using - ``PredictionService.Predict`` + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. supported_output_storage_formats (Sequence[str]): Output only. The formats this Model supports in - ``BatchPredictionJob.output_config``. + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. If both - ``PredictSchemata.instance_schema_uri`` + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and - ``PredictSchemata.prediction_schema_uri`` + [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction @@ -167,27 +167,27 @@ class Model(proto.Message): - ``jsonl`` The JSON Lines format, where each prediction is a single line. Uses - ``GcsDestination``. + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - ``csv`` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses - ``GcsDestination``. + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - ``bigquery`` Each prediction is a single row in a BigQuery table, uses - ``BigQueryDestination`` + [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] . If this Model doesn't support any of these formats it means it cannot be used with a - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has - ``supported_deployment_resources_types``, + [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online predictions by using - ``PredictionService.Predict`` + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Model was uploaded into AI Platform. @@ -204,32 +204,32 @@ class Model(proto.Message): The Model can be used for [requesting explanation][PredictionService.Explain] after being - ``deployed`` + [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] if it is populated. The Model can be used for [batch explanation][BatchPredictionJob.generate_explanation] if it is populated. All fields of the explanation_spec can be overridden by - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of - ``DeployModelRequest.deployed_model``, + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], or - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. If the default explanation specification is not set for this Model, this Model can still be used for [requesting explanation][PredictionService.Explain] by setting - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of - ``DeployModelRequest.deployed_model`` + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] and for [batch explanation][BatchPredictionJob.generate_explanation] by setting - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. etag (str): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update @@ -353,20 +353,20 @@ class ExportableContent(proto.Enum): class PredictSchemata(proto.Message): r"""Contains the schemata used in Model's predictions and explanations via - ``PredictionService.Predict``, - ``PredictionService.Explain`` + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] and - ``BatchPredictionJob``. + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. Attributes: instance_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in - ``PredictRequest.instances``, - ``ExplainRequest.instances`` + [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], + [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] and - ``BatchPredictionJob.input_config``. + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -378,10 +378,10 @@ class PredictSchemata(proto.Message): Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via - ``PredictRequest.parameters``, - ``ExplainRequest.parameters`` + [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], + [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] and - ``BatchPredictionJob.model_parameters``. + [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -394,10 +394,10 @@ class PredictSchemata(proto.Message): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via - ``PredictResponse.predictions``, - ``ExplainResponse.explanations``, + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], + [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], and - ``BatchPredictionJob.output_config``. + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -415,8 +415,9 @@ class PredictSchemata(proto.Message): class ModelContainerSpec(proto.Message): - r"""Specification of a container for serving predictions. This message - is a subset of the Kubernetes Container v1 core + r"""Specification of a container for serving predictions. Some fields in + this message correspond to fields in the Kubernetes Container v1 + core `specification `__. Attributes: @@ -430,7 +431,7 @@ class ModelContainerSpec(proto.Message): `here `__. The container image is ingested upon - ``ModelService.UploadModel``, + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored internally, and this original path is afterwards not used. @@ -452,7 +453,7 @@ class ModelContainerSpec(proto.Message): If you do not specify this field, then the container's ``ENTRYPOINT`` runs, in conjunction with the - ``args`` + [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] field or the container's ```CMD`` `__, if either exists. If this field is not specified and the @@ -472,7 +473,7 @@ class ModelContainerSpec(proto.Message): by AI Platform `__ and environment variables set in the - ``env`` + [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: @@ -492,7 +493,7 @@ class ModelContainerSpec(proto.Message): similar to a Docker ``CMD``'s "default parameters" form. If you don't specify this field but do specify the - ``command`` + [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] field, then the command from the ``command`` field runs without any additional arguments. See the `Kubernetes documentation `__ about how @@ -510,7 +511,7 @@ class ModelContainerSpec(proto.Message): by AI Platform `__ and environment variables set in the - ``env`` + [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: @@ -528,9 +529,9 @@ class ModelContainerSpec(proto.Message): in the container can read these environment variables. Additionally, the - ``command`` + [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] and - ``args`` + [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable ``VAR_2`` to have the @@ -581,7 +582,7 @@ class ModelContainerSpec(proto.Message): predict_route (str): Immutable. HTTP path on the container to send prediction requests to. AI Platform forwards requests sent using - ``projects.locations.endpoints.predict`` + [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] to this path on the container's IP address and port. AI Platform then returns the container's response in the API response. @@ -591,7 +592,7 @@ class ModelContainerSpec(proto.Message): request body in a POST request to the ``/foo`` path on the port of your container specified by the first value of this ``ModelContainerSpec``'s - ``ports`` + [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field. If you don't specify this field, it defaults to the @@ -608,7 +609,7 @@ class ModelContainerSpec(proto.Message): environment variable.) - DEPLOYED_MODEL: - ``DeployedModel.id`` + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the ``DeployedModel``. (AI Platform makes this value available to your container code as the ```AIP_DEPLOYED_MODEL_ID`` environment @@ -624,7 +625,7 @@ class ModelContainerSpec(proto.Message): Platform intermittently sends a GET request to the ``/bar`` path on the port of your container specified by the first value of this ``ModelContainerSpec``'s - ``ports`` + [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field. If you don't specify this field, it defaults to the @@ -641,7 +642,7 @@ class ModelContainerSpec(proto.Message): environment variable.) - DEPLOYED_MODEL: - ``DeployedModel.id`` + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the ``DeployedModel``. (AI Platform makes this value available to your container code as the ```AIP_DEPLOYED_MODEL_ID`` `__ diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py new file mode 100644 index 0000000000..e5f19dd3b7 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -0,0 +1,354 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ModelDeploymentMonitoringObjectiveType", + "ModelDeploymentMonitoringJob", + "ModelDeploymentMonitoringBigQueryTable", + "ModelDeploymentMonitoringObjectiveConfig", + "ModelDeploymentMonitoringScheduleConfig", + "ModelMonitoringStatsAnomalies", + }, +) + + +class ModelDeploymentMonitoringObjectiveType(proto.Enum): + r"""The Model Monitoring Objective types.""" + MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0 + RAW_FEATURE_SKEW = 1 + RAW_FEATURE_DRIFT = 2 + FEATURE_ATTRIBUTION_SKEW = 3 + FEATURE_ATTRIBUTION_DRIFT = 4 + + +class ModelDeploymentMonitoringJob(proto.Message): + r"""Represents a job that runs periodically to monitor the + deployed models in an endpoint. It will analyze the logged + training & prediction data to detect any abnormal behaviors. + + Attributes: + name (str): + Output only. Resource name of a + ModelDeploymentMonitoringJob. + display_name (str): + Required. The user-defined name of the + ModelDeploymentMonitoringJob. The name can be up + to 128 characters long and can be consist of any + UTF-8 characters. + Display name of a ModelDeploymentMonitoringJob. + endpoint (str): + Required. Endpoint resource name. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the + monitoring job. When the job is still creating, + the state will be 'PENDING'. Once the job is + successfully created, the state will be + 'RUNNING'. Pause the job, the state will be + 'PAUSED'. + Resume the job, the state will return to + 'RUNNING'. + schedule_state (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): + Output only. Schedule state when the + monitoring job is in Running state. + model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveConfig]): + Required. The config for monitoring + objectives. This is a per DeployedModel config. + Each DeployedModel needs to be configed + separately. + model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringScheduleConfig): + Required. Schedule config for running the + monitoring job. + logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): + Required. Sample Strategy for logging. + model_monitoring_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig): + Alert config for model monitoring. + predict_instance_schema_uri (str): + YAML schema file uri describing the format of + a single instance, which are given to format + this Endpoint's prediction (and explanation). If + not set, we will generate predict schema from + collected predict requests. + sample_predict_instance (google.protobuf.struct_pb2.Value): + Sample Predict instance, same format as + [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], + this can be set as a replacement of + [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. + If not set, we will generate predict schema from collected + predict requests. + analysis_instance_schema_uri (str): + YAML schema file uri describing the format of a single + instance that you want Tensorflow Data Validation (TFDV) to + analyze. + + If this field is empty, all the feature data types are + inferred from + [predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri], + meaning that TFDV will use the data in the exact format(data + type) as prediction request/response. If there are any data + type differences between predict instance and TFDV instance, + this field can be used to override the schema. For models + trained with AI Platform, this field must be set as all the + fields in predict instance formatted as string. + bigquery_tables (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable]): + Output only. The created bigquery tables for + the job under customer project. Customer could + do their own query & analysis. There could be 4 + log tables in maximum: + 1. Training data logging predict + request/response 2. Serving data logging predict + request/response + log_ttl (google.protobuf.duration_pb2.Duration): + The TTL of BigQuery tables in user projects + which stores logs. A day is the basic unit of + the TTL and we take the ceil of TTL/86400(a + day). e.g. { second: 3600} indicates ttl = 1 + day. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.LabelsEntry]): + The labels with user-defined metadata to + organize your ModelDeploymentMonitoringJob. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was updated most + recently. + next_schedule_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this monitoring + pipeline will be scheduled to run for the next + round. + stats_anomalies_base_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Stats anomalies base folder path. + """ + + class MonitoringScheduleState(proto.Enum): + r"""The state to Specify the monitoring pipeline.""" + MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0 + PENDING = 1 + OFFLINE = 2 + RUNNING = 3 + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + endpoint = proto.Field(proto.STRING, number=3) + + state = proto.Field(proto.ENUM, number=4, enum=job_state.JobState,) + + schedule_state = proto.Field(proto.ENUM, number=5, enum=MonitoringScheduleState,) + + model_deployment_monitoring_objective_configs = proto.RepeatedField( + proto.MESSAGE, number=6, message="ModelDeploymentMonitoringObjectiveConfig", + ) + + model_deployment_monitoring_schedule_config = proto.Field( + proto.MESSAGE, number=7, message="ModelDeploymentMonitoringScheduleConfig", + ) + + logging_sampling_strategy = proto.Field( + proto.MESSAGE, number=8, message=model_monitoring.SamplingStrategy, + ) + + model_monitoring_alert_config = proto.Field( + proto.MESSAGE, number=15, message=model_monitoring.ModelMonitoringAlertConfig, + ) + + predict_instance_schema_uri = proto.Field(proto.STRING, number=9) + + sample_predict_instance = proto.Field( + proto.MESSAGE, number=19, message=struct.Value, + ) + + analysis_instance_schema_uri = proto.Field(proto.STRING, number=16) + + bigquery_tables = proto.RepeatedField( + proto.MESSAGE, number=10, message="ModelDeploymentMonitoringBigQueryTable", + ) + + log_ttl = proto.Field(proto.MESSAGE, number=17, message=duration.Duration,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=11) + + create_time = proto.Field(proto.MESSAGE, number=12, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp,) + + next_schedule_time = proto.Field( + proto.MESSAGE, number=14, message=timestamp.Timestamp, + ) + + stats_anomalies_base_directory = proto.Field( + proto.MESSAGE, number=20, message=io.GcsDestination, + ) + + +class ModelDeploymentMonitoringBigQueryTable(proto.Message): + r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery + table name as well as some information of the logs stored in + this table. + + Attributes: + log_source (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogSource): + The source of log. + log_type (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogType): + The type of log. + bigquery_table_path (str): + The created BigQuery table to store logs. Customer could do + their own query & analysis. Format: + ``bq://.model_deployment_monitoring_._`` + """ + + class LogSource(proto.Enum): + r"""Indicates where does the log come from.""" + LOG_SOURCE_UNSPECIFIED = 0 + TRAINING = 1 + SERVING = 2 + + class LogType(proto.Enum): + r"""Indicates what type of traffic does the log belong to.""" + LOG_TYPE_UNSPECIFIED = 0 + PREDICT = 1 + EXPLAIN = 2 + + log_source = proto.Field(proto.ENUM, number=1, enum=LogSource,) + + log_type = proto.Field(proto.ENUM, number=2, enum=LogType,) + + bigquery_table_path = proto.Field(proto.STRING, number=3) + + +class ModelDeploymentMonitoringObjectiveConfig(proto.Message): + r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of + deployed_model_id to ModelMonitoringObjectiveConfig. + + Attributes: + deployed_model_id (str): + The DeployedModel ID of the objective config. + objective_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig): + The objective config of for the + modelmonitoring job of this deployed model. + """ + + deployed_model_id = proto.Field(proto.STRING, number=1) + + objective_config = proto.Field( + proto.MESSAGE, + number=2, + message=model_monitoring.ModelMonitoringObjectiveConfig, + ) + + +class ModelDeploymentMonitoringScheduleConfig(proto.Message): + r"""The config for scheduling monitoring job. + + Attributes: + monitor_interval (google.protobuf.duration_pb2.Duration): + Required. The model monitoring job running + interval. It will be rounded up to next full + hour. + """ + + monitor_interval = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + +class ModelMonitoringStatsAnomalies(proto.Message): + r"""Statistics and anomalies generated by Model Monitoring. + + Attributes: + objective (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): + Model Monitoring Objective those stats and + anomalies belonging to. + deployed_model_id (str): + Deployed Model ID. + anomaly_count (int): + Number of anomalies within all stats. + feature_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]): + A list of historical Stats and Anomalies + generated for all Features. + """ + + class FeatureHistoricStatsAnomalies(proto.Message): + r"""Historical Stats (and Anomalies) for a specific Feature. + + Attributes: + feature_display_name (str): + Display Name of the Feature. + threshold (google.cloud.aiplatform_v1beta1.types.ThresholdConfig): + Threshold for anomaly detection. + training_stats (google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly): + Stats calculated for the Training Dataset. + prediction_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): + A list of historical stats generated by + different time window's Prediction Dataset. + """ + + feature_display_name = proto.Field(proto.STRING, number=1) + + threshold = proto.Field( + proto.MESSAGE, number=3, message=model_monitoring.ThresholdConfig, + ) + + training_stats = proto.Field( + proto.MESSAGE, + number=4, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + prediction_stats = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + objective = proto.Field( + proto.ENUM, number=1, enum="ModelDeploymentMonitoringObjectiveType", + ) + + deployed_model_id = proto.Field(proto.STRING, number=2) + + anomaly_count = proto.Field(proto.INT32, number=3) + + feature_stats = proto.RepeatedField( + proto.MESSAGE, number=4, message=FeatureHistoricStatsAnomalies, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py index 391bc38cf4..661241eb26 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -40,23 +40,23 @@ class ModelEvaluation(proto.Message): metrics_schema_uri (str): Output only. Points to a YAML file stored on Google Cloud Storage describing the - ``metrics`` + [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Evaluation metrics of the Model. The schema of the metrics is stored in - ``metrics_schema_uri`` + [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this ModelEvaluation was created. slice_dimensions (Sequence[str]): Output only. All possible - ``dimensions`` of + [dimensions][ModelEvaluationSlice.slice.dimension] of ModelEvaluationSlices. The dimensions can be used as the filter of the - ``ModelService.ListModelEvaluationSlices`` + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] request, in the form of ``slice.dimension = ``. model_explanation (google.cloud.aiplatform_v1beta1.types.ModelExplanation): Output only. Aggregated explanation metrics @@ -67,7 +67,7 @@ class ModelEvaluation(proto.Message): Models. explanation_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): Output only. Describes the values of - ``ExplanationSpec`` + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] that are used for explaining the predicted values on the evaluated data. """ diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py index 2d66e29a9f..ef15398bd7 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py @@ -42,14 +42,14 @@ class ModelEvaluationSlice(proto.Message): metrics_schema_uri (str): Output only. Points to a YAML file stored on Google Cloud Storage describing the - ``metrics`` + [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in - ``metrics_schema_uri`` + [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri] create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this ModelEvaluationSlice was created. @@ -65,9 +65,9 @@ class Slice(proto.Message): - ``annotationSpec``: This slice is on the test data that has either ground truth or prediction with - ``AnnotationSpec.display_name`` + [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] equals to - ``value``. + [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. value (str): Output only. The value of the dimension in this slice. diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py new file mode 100644 index 0000000000..fd605d8265 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import io + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ModelMonitoringObjectiveConfig", + "ModelMonitoringAlertConfig", + "ThresholdConfig", + "SamplingStrategy", + }, +) + + +class ModelMonitoringObjectiveConfig(proto.Message): + r"""Next ID: 6 + + Attributes: + training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset): + Training dataset for models. This field has + to be set only if + TrainingPredictionSkewDetectionConfig is + specified. + training_prediction_skew_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig): + The config for skew between training data and + prediction data. + prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): + The config for drift of prediction data. + """ + + class TrainingDataset(proto.Message): + r"""Training Dataset information. + + Attributes: + dataset (str): + The resource name of the Dataset used to + train this Model. + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Google Cloud Storage uri of the unmanaged + Dataset used to train this Model. + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + The BigQuery table of the unmanaged Dataset + used to train this Model. + data_format (str): + Data format of the dataset, only applicable + if the input is from Google Cloud Storage. + The possible formats are: + + "tf-record" + The source file is a TFRecord file. + + "csv" + The source file is a CSV file. + target_field (str): + The target field name the model is to + predict. This field will be excluded when doing + Predict and (or) Explain for the training data. + logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): + Strategy to sample data from Training + Dataset. If not set, we process the whole + dataset. + """ + + dataset = proto.Field(proto.STRING, number=3, oneof="data_source") + + gcs_source = proto.Field( + proto.MESSAGE, number=4, oneof="data_source", message=io.GcsSource, + ) + + bigquery_source = proto.Field( + proto.MESSAGE, number=5, oneof="data_source", message=io.BigQuerySource, + ) + + data_format = proto.Field(proto.STRING, number=2) + + target_field = proto.Field(proto.STRING, number=6) + + logging_sampling_strategy = proto.Field( + proto.MESSAGE, number=7, message="SamplingStrategy", + ) + + class TrainingPredictionSkewDetectionConfig(proto.Message): + r"""The config for Training & Prediction data skew detection. It + specifies the training dataset sources and the skew detection + parameters. + + Attributes: + skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for skew, a value threshold must be configed for + that feature. The threshold here is against + feature distribution distance between the + training and prediction feature. + """ + + skew_thresholds = proto.MapField( + proto.STRING, proto.MESSAGE, number=1, message="ThresholdConfig", + ) + + class PredictionDriftDetectionConfig(proto.Message): + r"""The config for Prediction data drift detection. + + Attributes: + drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for drift, a value threshold must be configed + for that feature. The threshold here is against + feature distribution distance between different + time windws. + """ + + drift_thresholds = proto.MapField( + proto.STRING, proto.MESSAGE, number=1, message="ThresholdConfig", + ) + + training_dataset = proto.Field(proto.MESSAGE, number=1, message=TrainingDataset,) + + training_prediction_skew_detection_config = proto.Field( + proto.MESSAGE, number=2, message=TrainingPredictionSkewDetectionConfig, + ) + + prediction_drift_detection_config = proto.Field( + proto.MESSAGE, number=3, message=PredictionDriftDetectionConfig, + ) + + +class ModelMonitoringAlertConfig(proto.Message): + r"""Next ID: 2 + + Attributes: + email_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig.EmailAlertConfig): + Email alert config. + """ + + class EmailAlertConfig(proto.Message): + r"""The config for email alert. + + Attributes: + user_emails (Sequence[str]): + The email addresses to send the alert. + """ + + user_emails = proto.RepeatedField(proto.STRING, number=1) + + email_alert_config = proto.Field( + proto.MESSAGE, number=1, oneof="alert", message=EmailAlertConfig, + ) + + +class ThresholdConfig(proto.Message): + r"""The config for feature monitoring threshold. + Next ID: 3 + + Attributes: + value (float): + Specify a threshold value that can trigger + the alert. If this threshold config is for + feature distribution distance: 1. For + categorical feature, the distribution distance + is calculated by L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + Each feature must have a non-zero threshold if + they need to be monitored. Otherwise no alert + will be triggered for that feature. + """ + + value = proto.Field(proto.DOUBLE, number=1, oneof="threshold") + + +class SamplingStrategy(proto.Message): + r"""Sampling Strategy for logging, can be for both training and + prediction dataset. + Next ID: 2 + + Attributes: + random_sample_config (google.cloud.aiplatform_v1beta1.types.SamplingStrategy.RandomSampleConfig): + Random sample config. Will support more + sampling strategies later. + """ + + class RandomSampleConfig(proto.Message): + r"""Requests are randomly selected. + + Attributes: + sample_rate (float): + Sample rate (0, 1] + """ + + sample_rate = proto.Field(proto.DOUBLE, number=1) + + random_sample_config = proto.Field( + proto.MESSAGE, number=1, message=RandomSampleConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index e0d8e148ab..be2f1aae6e 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -52,7 +52,7 @@ class UploadModelRequest(proto.Message): r"""Request message for - ``ModelService.UploadModel``. + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. Attributes: parent (str): @@ -70,7 +70,7 @@ class UploadModelRequest(proto.Message): class UploadModelOperationMetadata(proto.Message): r"""Details of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation. Attributes: @@ -85,7 +85,7 @@ class UploadModelOperationMetadata(proto.Message): class UploadModelResponse(proto.Message): r"""Response message of - ``ModelService.UploadModel`` + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation. Attributes: @@ -99,7 +99,7 @@ class UploadModelResponse(proto.Message): class GetModelRequest(proto.Message): r"""Request message for - ``ModelService.GetModel``. + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. Attributes: name (str): @@ -112,7 +112,7 @@ class GetModelRequest(proto.Message): class ListModelsRequest(proto.Message): r"""Request message for - ``ModelService.ListModels``. + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. Attributes: parent (str): @@ -143,9 +143,9 @@ class ListModelsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListModelsResponse.next_page_token`` + [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] of the previous - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -164,14 +164,14 @@ class ListModelsRequest(proto.Message): class ListModelsResponse(proto.Message): r"""Response message for - ``ModelService.ListModels`` + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] Attributes: models (Sequence[google.cloud.aiplatform_v1beta1.types.Model]): List of Models in the requested page. next_page_token (str): A token to retrieve next page of results. Pass to - ``ListModelsRequest.page_token`` + [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] to obtain that page. """ @@ -186,7 +186,7 @@ def raw_page(self): class UpdateModelRequest(proto.Message): r"""Request message for - ``ModelService.UpdateModel``. + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. Attributes: model (google.cloud.aiplatform_v1beta1.types.Model): @@ -205,7 +205,7 @@ class UpdateModelRequest(proto.Message): class DeleteModelRequest(proto.Message): r"""Request message for - ``ModelService.DeleteModel``. + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. Attributes: name (str): @@ -219,7 +219,7 @@ class DeleteModelRequest(proto.Message): class ExportModelRequest(proto.Message): r"""Request message for - ``ModelService.ExportModel``. + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. Attributes: name (str): @@ -276,7 +276,7 @@ class OutputConfig(proto.Message): class ExportModelOperationMetadata(proto.Message): r"""Details of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation. Attributes: @@ -289,7 +289,7 @@ class ExportModelOperationMetadata(proto.Message): class OutputInfo(proto.Message): r"""Further describes the output of the ExportModel. Supplements - ``ExportModelRequest.OutputConfig``. + [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig]. Attributes: artifact_output_uri (str): @@ -317,14 +317,14 @@ class OutputInfo(proto.Message): class ExportModelResponse(proto.Message): r"""Response message of - ``ModelService.ExportModel`` + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation. """ class GetModelEvaluationRequest(proto.Message): r"""Request message for - ``ModelService.GetModelEvaluation``. + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. Attributes: name (str): @@ -337,7 +337,7 @@ class GetModelEvaluationRequest(proto.Message): class ListModelEvaluationsRequest(proto.Message): r"""Request message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. Attributes: parent (str): @@ -350,9 +350,9 @@ class ListModelEvaluationsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListModelEvaluationsResponse.next_page_token`` + [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] of the previous - ``ModelService.ListModelEvaluations`` + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -371,7 +371,7 @@ class ListModelEvaluationsRequest(proto.Message): class ListModelEvaluationsResponse(proto.Message): r"""Response message for - ``ModelService.ListModelEvaluations``. + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. Attributes: model_evaluations (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation]): @@ -379,7 +379,7 @@ class ListModelEvaluationsResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - ``ListModelEvaluationsRequest.page_token`` + [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] to obtain that page. """ @@ -396,7 +396,7 @@ def raw_page(self): class GetModelEvaluationSliceRequest(proto.Message): r"""Request message for - ``ModelService.GetModelEvaluationSlice``. + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. Attributes: name (str): @@ -410,7 +410,7 @@ class GetModelEvaluationSliceRequest(proto.Message): class ListModelEvaluationSlicesRequest(proto.Message): r"""Request message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. Attributes: parent (str): @@ -425,9 +425,9 @@ class ListModelEvaluationSlicesRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListModelEvaluationSlicesResponse.next_page_token`` + [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] of the previous - ``ModelService.ListModelEvaluationSlices`` + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -446,7 +446,7 @@ class ListModelEvaluationSlicesRequest(proto.Message): class ListModelEvaluationSlicesResponse(proto.Message): r"""Response message for - ``ModelService.ListModelEvaluationSlices``. + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. Attributes: model_evaluation_slices (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]): @@ -454,7 +454,7 @@ class ListModelEvaluationSlicesResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - ``ListModelEvaluationSlicesRequest.page_token`` + [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] to obtain that page. """ diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py new file mode 100644 index 0000000000..db6eb5c5bc --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -0,0 +1,353 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import value as gca_value +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "PipelineJob", + "PipelineJobDetail", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", + }, +) + + +class PipelineJob(proto.Message): + r"""An instance of a machine learning PipelineJob. + + Attributes: + name (str): + Output only. The resource name of the + PipelineJob. + display_name (str): + The display name of the Pipeline. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline creation time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline end time. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this PipelineJob + was most recently updated. + pipeline_spec (google.protobuf.struct_pb2.Struct): + Required. The spec of the pipeline. The spec contains a + ``schema_version`` field which indicates the Kubeflow + Pipeline schema version to decode the struct. + state (google.cloud.aiplatform_v1beta1.types.PipelineState): + Output only. The detailed state of the job. + job_detail (google.cloud.aiplatform_v1beta1.types.PipelineJobDetail): + Output only. The details of pipeline run. Not + available in the list view. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + pipeline execution. Only populated when the + pipeline's state is FAILED or CANCELLED. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.LabelsEntry]): + The labels with user-defined metadata to + organize PipelineJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + runtime_config (google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig): + Runtime config of the pipeline. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + pipelineJob. If set, this PipelineJob and all of + its sub-resources will be secured by this key. + service_account (str): + The service account that the pipeline workload runs as. If + not specified, the Compute Engine default service account in + the project will be used. See + https://cloud.google.com/compute/docs/access/service-accounts#default_service_account + + Users starting the pipeline must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + network (str): + The full name of the Compute Engine + `network `__ + to which the Pipeline Job's workload should be peered. For + example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. Pipeline job will apply the network configuration + to the GCP resources being launched, if applied, such as + Cloud AI Platform Training or Dataflow job. If left + unspecified, the workload is not peered with any network. + """ + + class RuntimeConfig(proto.Message): + r"""The runtime config of a PipelineJob. + + Attributes: + parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): + The runtime parameters of the PipelineJob. The parameters + will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. + gcs_output_directory (str): + Required. A path in a Cloud Storage bucket, which will be + treated as the root output directory of the pipeline. It is + used by the system to generate the paths of output + artifacts. The artifact paths are generated with a sub-path + pattern ``{job_id}/{task_id}/{output_key}`` under the + specified output directory. The service account specified in + this pipeline must have the ``storage.objects.get`` and + ``storage.objects.create`` permissions for this bucket. + """ + + parameters = proto.MapField( + proto.STRING, proto.MESSAGE, number=1, message=gca_value.Value, + ) + + gcs_output_directory = proto.Field(proto.STRING, number=2) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,) + + pipeline_spec = proto.Field(proto.MESSAGE, number=7, message=struct.Struct,) + + state = proto.Field(proto.ENUM, number=8, enum=pipeline_state.PipelineState,) + + job_detail = proto.Field(proto.MESSAGE, number=9, message="PipelineJobDetail",) + + error = proto.Field(proto.MESSAGE, number=10, message=status.Status,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=11) + + runtime_config = proto.Field(proto.MESSAGE, number=12, message=RuntimeConfig,) + + encryption_spec = proto.Field( + proto.MESSAGE, number=16, message=gca_encryption_spec.EncryptionSpec, + ) + + service_account = proto.Field(proto.STRING, number=17) + + network = proto.Field(proto.STRING, number=18) + + +class PipelineJobDetail(proto.Message): + r"""The runtime detail of PipelineJob. + + Attributes: + pipeline_context (google.cloud.aiplatform_v1beta1.types.Context): + Output only. The context of the pipeline. + pipeline_run_context (google.cloud.aiplatform_v1beta1.types.Context): + Output only. The context of the current + pipeline run. + task_details (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail]): + Output only. The runtime details of the tasks + under the pipeline. + """ + + pipeline_context = proto.Field(proto.MESSAGE, number=1, message=context.Context,) + + pipeline_run_context = proto.Field( + proto.MESSAGE, number=2, message=context.Context, + ) + + task_details = proto.RepeatedField( + proto.MESSAGE, number=3, message="PipelineTaskDetail", + ) + + +class PipelineTaskDetail(proto.Message): + r"""The runtime detail of a task execution. + + Attributes: + task_id (int): + Output only. The system generated ID of the + task. + parent_task_id (int): + Output only. The id of the parent task if the + task is within a component scope. Empty if the + task is at the root level. + task_name (str): + Output only. The user specified name of the task that is + defined in [PipelineJob.spec][]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task create time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task end time. + executor_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail): + Output only. The detailed execution info. + state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): + Output only. State of the task. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Output only. The execution metadata of the + task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + task execution. Only populated when the task's + state is FAILED or CANCELLED. + inputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.InputsEntry]): + Output only. The runtime input artifacts of + the task. + outputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.OutputsEntry]): + Output only. The runtime output artifacts of + the task. + """ + + class State(proto.Enum): + r"""Specifies state of TaskExecution""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + SUCCEEDED = 3 + CANCEL_PENDING = 4 + CANCELLING = 5 + CANCELLED = 6 + FAILED = 7 + SKIPPED = 8 + NOT_TRIGGERED = 9 + + class ArtifactList(proto.Message): + r"""A list of artifact metadata. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + Output only. A list of artifact metadata. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, number=1, message=artifact.Artifact, + ) + + task_id = proto.Field(proto.INT64, number=1) + + parent_task_id = proto.Field(proto.INT64, number=12) + + task_name = proto.Field(proto.STRING, number=2) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + executor_detail = proto.Field( + proto.MESSAGE, number=6, message="PipelineTaskExecutorDetail", + ) + + state = proto.Field(proto.ENUM, number=7, enum=State,) + + execution = proto.Field(proto.MESSAGE, number=8, message=gca_execution.Execution,) + + error = proto.Field(proto.MESSAGE, number=9, message=status.Status,) + + inputs = proto.MapField( + proto.STRING, proto.MESSAGE, number=10, message=ArtifactList, + ) + + outputs = proto.MapField( + proto.STRING, proto.MESSAGE, number=11, message=ArtifactList, + ) + + +class PipelineTaskExecutorDetail(proto.Message): + r"""The runtime detail of a pipeline executor. + + Attributes: + container_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.ContainerDetail): + Output only. The detailed info for a + container executor. + custom_job_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.CustomJobDetail): + Output only. The detailed info for a custom + job executor. + """ + + class ContainerDetail(proto.Message): + r"""The detail of a container execution. It contains the job + names of the lifecycle of a container execution. + + Attributes: + main_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for + the main container execution. + pre_caching_check_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for + the pre-caching-check container execution. This job will be + available if the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] + specifies the ``pre_caching_check`` hook in the lifecycle + events. + """ + + main_job = proto.Field(proto.STRING, number=1) + + pre_caching_check_job = proto.Field(proto.STRING, number=2) + + class CustomJobDetail(proto.Message): + r"""The detailed info for a custom job executor. + + Attributes: + job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob]. + """ + + job = proto.Field(proto.STRING, number=1) + + container_detail = proto.Field( + proto.MESSAGE, number=1, oneof="details", message=ContainerDetail, + ) + + custom_job_detail = proto.Field( + proto.MESSAGE, number=2, oneof="details", message=CustomJobDetail, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index b06361dfa9..ce51990e4d 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -18,6 +18,7 @@ import proto # type: ignore +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import ( training_pipeline as gca_training_pipeline, ) @@ -33,13 +34,19 @@ "ListTrainingPipelinesResponse", "DeleteTrainingPipelineRequest", "CancelTrainingPipelineRequest", + "CreatePipelineJobRequest", + "GetPipelineJobRequest", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", + "DeletePipelineJobRequest", + "CancelPipelineJobRequest", }, ) class CreateTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.CreateTrainingPipeline``. + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. Attributes: parent (str): @@ -59,7 +66,7 @@ class CreateTrainingPipelineRequest(proto.Message): class GetTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.GetTrainingPipeline``. + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. Attributes: name (str): @@ -72,7 +79,7 @@ class GetTrainingPipelineRequest(proto.Message): class ListTrainingPipelinesRequest(proto.Message): r"""Request message for - ``PipelineService.ListTrainingPipelines``. + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. Attributes: parent (str): @@ -99,9 +106,9 @@ class ListTrainingPipelinesRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - ``ListTrainingPipelinesResponse.next_page_token`` + [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] of the previous - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] call. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -120,7 +127,7 @@ class ListTrainingPipelinesRequest(proto.Message): class ListTrainingPipelinesResponse(proto.Message): r"""Response message for - ``PipelineService.ListTrainingPipelines`` + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] Attributes: training_pipelines (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline]): @@ -128,7 +135,7 @@ class ListTrainingPipelinesResponse(proto.Message): page. next_page_token (str): A token to retrieve the next page of results. Pass to - ``ListTrainingPipelinesRequest.page_token`` + [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] to obtain that page. """ @@ -145,7 +152,7 @@ def raw_page(self): class DeleteTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.DeleteTrainingPipeline``. + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. Attributes: name (str): @@ -159,7 +166,7 @@ class DeleteTrainingPipelineRequest(proto.Message): class CancelTrainingPipelineRequest(proto.Message): r"""Request message for - ``PipelineService.CancelTrainingPipeline``. + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. Attributes: name (str): @@ -171,4 +178,137 @@ class CancelTrainingPipelineRequest(proto.Message): name = proto.Field(proto.STRING, number=1) +class CreatePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): + Required. The PipelineJob to create. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not provided, an + ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + """ + + parent = proto.Field(proto.STRING, number=1) + + pipeline_job = proto.Field( + proto.MESSAGE, number=2, message=gca_pipeline_job.PipelineJob, + ) + + pipeline_job_id = proto.Field(proto.STRING, number=3) + + +class GetPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListPipelineJobsRequest(proto.Message): + r"""Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports = and !=. + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + - ``NOT display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token] + of the previous + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListPipelineJobsResponse(proto.Message): + r"""Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Attributes: + pipeline_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob]): + List of PipelineJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + pipeline_jobs = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_pipeline_job.PipelineJob, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeletePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class CancelPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index f7abe9e3e2..14eaa6b8fd 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -35,7 +35,7 @@ class PredictRequest(proto.Message): r"""Request message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. Attributes: endpoint (str): @@ -53,14 +53,14 @@ class PredictRequest(proto.Message): DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. parameters (google.protobuf.struct_pb2.Value): The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. """ endpoint = proto.Field(proto.STRING, number=1) @@ -72,7 +72,7 @@ class PredictRequest(proto.Message): class PredictResponse(proto.Message): r"""Response message for - ``PredictionService.Predict``. + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. Attributes: predictions (Sequence[google.protobuf.struct_pb2.Value]): @@ -81,7 +81,7 @@ class PredictResponse(proto.Message): Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``prediction_schema_uri``. + [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. deployed_model_id (str): ID of the Endpoint's DeployedModel that served this prediction. @@ -94,7 +94,7 @@ class PredictResponse(proto.Message): class ExplainRequest(proto.Message): r"""Request message for - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. Attributes: endpoint (str): @@ -112,17 +112,17 @@ class ExplainRequest(proto.Message): DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``instance_schema_uri``. + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. parameters (google.protobuf.struct_pb2.Value): The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - ``parameters_schema_uri``. + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. explanation_spec_override (google.cloud.aiplatform_v1beta1.types.ExplanationSpecOverride): If specified, overrides the - ``explanation_spec`` + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of the DeployedModel. Can be used for explaining prediction results with different configurations, such as: @@ -134,7 +134,7 @@ class ExplainRequest(proto.Message): deployed_model_id (str): If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding - ``Endpoint.traffic_split``. + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. """ endpoint = proto.Field(proto.STRING, number=1) @@ -152,15 +152,15 @@ class ExplainRequest(proto.Message): class ExplainResponse(proto.Message): r"""Response message for - ``PredictionService.Explain``. + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. Attributes: explanations (Sequence[google.cloud.aiplatform_v1beta1.types.Explanation]): The explanations of the Model's - ``PredictResponse.predictions``. + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. It has the same number of elements as - ``instances`` + [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] to be explained. deployed_model_id (str): ID of the Endpoint's DeployedModel that @@ -168,7 +168,7 @@ class ExplainResponse(proto.Message): predictions (Sequence[google.protobuf.struct_pb2.Value]): The predictions that are the output of the predictions call. Same as - ``PredictResponse.predictions``. + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. """ explanations = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py index 3ed6593bd6..955b1e5a53 100644 --- a/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py +++ b/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py @@ -40,7 +40,7 @@ class CreateSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. Attributes: parent (str): @@ -60,7 +60,7 @@ class CreateSpecialistPoolRequest(proto.Message): class CreateSpecialistPoolOperationMetadata(proto.Message): r"""Runtime operation information for - ``SpecialistPoolService.CreateSpecialistPool``. + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): @@ -74,7 +74,7 @@ class CreateSpecialistPoolOperationMetadata(proto.Message): class GetSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.GetSpecialistPool``. + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. Attributes: name (str): @@ -88,7 +88,7 @@ class GetSpecialistPoolRequest(proto.Message): class ListSpecialistPoolsRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. Attributes: parent (str): @@ -98,9 +98,9 @@ class ListSpecialistPoolsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained by - ``ListSpecialistPoolsResponse.next_page_token`` + [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] of the previous - ``SpecialistPoolService.ListSpecialistPools`` + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] call. Return first page if empty. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -118,7 +118,7 @@ class ListSpecialistPoolsRequest(proto.Message): class ListSpecialistPoolsResponse(proto.Message): r"""Response message for - ``SpecialistPoolService.ListSpecialistPools``. + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. Attributes: specialist_pools (Sequence[google.cloud.aiplatform_v1beta1.types.SpecialistPool]): @@ -141,7 +141,7 @@ def raw_page(self): class DeleteSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.DeleteSpecialistPool``. + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. Attributes: name (str): @@ -162,7 +162,7 @@ class DeleteSpecialistPoolRequest(proto.Message): class UpdateSpecialistPoolRequest(proto.Message): r"""Request message for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. Attributes: specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): @@ -182,7 +182,7 @@ class UpdateSpecialistPoolRequest(proto.Message): class UpdateSpecialistPoolOperationMetadata(proto.Message): r"""Runtime operation metadata for - ``SpecialistPoolService.UpdateSpecialistPool``. + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. Attributes: specialist_pool (str): diff --git a/google/cloud/aiplatform_v1beta1/types/study.py b/google/cloud/aiplatform_v1beta1/types/study.py index 092d3a3e2d..b89652b37d 100644 --- a/google/cloud/aiplatform_v1beta1/types/study.py +++ b/google/cloud/aiplatform_v1beta1/types/study.py @@ -18,6 +18,7 @@ import proto # type: ignore +from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import struct_pb2 as struct # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore @@ -91,11 +92,30 @@ class Trial(proto.Message): final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): Output only. The final measurement containing the objective value. + measurements (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement]): + Output only. A list of measurements that are strictly + lexicographically ordered by their induced tuples (steps, + elapsed_duration). These are used for early stopping + computations. start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the Trial was started. end_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the Trial's status changed to ``SUCCEEDED`` or ``INFEASIBLE``. + client_id (str): + Output only. The identifier of the client that originally + requested this Trial. Each client is identified by a unique + client_id. When a client asks for a suggestion, Vizier will + assign it a Trial. The client should evaluate the Trial, + complete it, and report back to Vizier. If suggestion is + asked again by same client_id before the Trial is completed, + the same Trial will be returned. Multiple clients with + different client_ids can ask for suggestions simultaneously, + each of them will get their own Trial. + infeasible_reason (str): + Output only. A human readable string describing why the + Trial is infeasible. This is set only if Trial state is + ``INFEASIBLE``. custom_job (str): Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's @@ -141,10 +161,16 @@ class Parameter(proto.Message): final_measurement = proto.Field(proto.MESSAGE, number=5, message="Measurement",) + measurements = proto.RepeatedField(proto.MESSAGE, number=6, message="Measurement",) + start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,) + client_id = proto.Field(proto.STRING, number=9) + + infeasible_reason = proto.Field(proto.STRING, number=10) + custom_job = proto.Field(proto.STRING, number=11) @@ -460,9 +486,9 @@ class DecayCurveAutomatedStoppingSpec(proto.Message): Attributes: use_elapsed_duration (bool): True if - ``Measurement.elapsed_duration`` + [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration] is used as the x-axis of each Trials Decay Curve. Otherwise, - ``Measurement.step_count`` + [Measurement.step_count][google.cloud.aiplatform.v1beta1.Measurement.step_count] will be used as the x-axis. """ @@ -479,7 +505,7 @@ class MedianAutomatedStoppingSpec(proto.Message): Attributes: use_elapsed_duration (bool): True if median automated stopping rule applies on - ``Measurement.elapsed_duration``. + [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration]. It means that elapsed_duration field of latest measurement of current Trial is used to compute median objective value for each completed Trials. @@ -575,6 +601,9 @@ class Measurement(proto.Message): suggested hyperparameter values. Attributes: + elapsed_duration (google.protobuf.duration_pb2.Duration): + Output only. Time that the Trial has been + running at the point of this Measurement. step_count (int): Output only. The number of steps the machine learning model has been trained for. Must be @@ -601,6 +630,8 @@ class Metric(proto.Message): value = proto.Field(proto.DOUBLE, number=2) + elapsed_duration = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + step_count = proto.Field(proto.INT64, number=2) metrics = proto.RepeatedField(proto.MESSAGE, number=3, message=Metric,) diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/google/cloud/aiplatform_v1beta1/types/tensorboard.py new file mode 100644 index 0000000000..45db95e7fb --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Tensorboard",}, +) + + +class Tensorboard(proto.Message): + r"""Tensorboard is a physical database that stores users’ + training metrics. A default Tensorboard is provided in each + region of a GCP project. If needed users can also create extra + Tensorboards in their projects. + + Attributes: + name (str): + Output only. Name of the Tensorboard. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + display_name (str): + Required. User provided name of this + Tensorboard. + description (str): + Description of this Tensorboard. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Tensorboard. If set, this Tensorboard and all + sub-resources of this Tensorboard will be + secured by this key. + blob_storage_path_prefix (str): + Output only. Consumer project Cloud Storage + path prefix used to store blob data, which can + either be a bucket or directory. Does not end + with a '/'. + run_count (int): + Output only. The number of Runs stored in + this Tensorboard. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was last updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard.LabelsEntry]): + The labels with user-defined metadata to + organize your Tensorboards. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Tensorboard (System labels + are excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + encryption_spec = proto.Field( + proto.MESSAGE, number=11, message=gca_encryption_spec.EncryptionSpec, + ) + + blob_storage_path_prefix = proto.Field(proto.STRING, number=10) + + run_count = proto.Field(proto.INT32, number=5) + + create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=8) + + etag = proto.Field(proto.STRING, number=9) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py new file mode 100644 index 0000000000..cd217297fc --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "TimeSeriesData", + "TimeSeriesDataPoint", + "Scalar", + "TensorboardTensor", + "TensorboardBlobSequence", + "TensorboardBlob", + }, +) + + +class TimeSeriesData(proto.Message): + r"""All the data stored in a TensorboardTimeSeries. + + Attributes: + tensorboard_time_series_id (str): + Required. The ID of the + TensorboardTimeSeries, which will become the + final component of the TensorboardTimeSeries' + resource name + value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. The value type of this + time series. All the values in this time series + data must match this value type. + values (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): + Required. Data points in this time series. + """ + + tensorboard_time_series_id = proto.Field(proto.STRING, number=1) + + value_type = proto.Field( + proto.ENUM, + number=2, + enum=tensorboard_time_series.TensorboardTimeSeries.ValueType, + ) + + values = proto.RepeatedField( + proto.MESSAGE, number=3, message="TimeSeriesDataPoint", + ) + + +class TimeSeriesDataPoint(proto.Message): + r"""A TensorboardTimeSeries data point. + + Attributes: + scalar (google.cloud.aiplatform_v1beta1.types.Scalar): + A scalar value. + tensor (google.cloud.aiplatform_v1beta1.types.TensorboardTensor): + A tensor value. + blobs (google.cloud.aiplatform_v1beta1.types.TensorboardBlobSequence): + A blob sequence value. + wall_time (google.protobuf.timestamp_pb2.Timestamp): + Wall clock timestamp when this data point is + generated by the end user. + step (int): + Step index of this data point within the run. + """ + + scalar = proto.Field(proto.MESSAGE, number=3, oneof="value", message="Scalar",) + + tensor = proto.Field( + proto.MESSAGE, number=4, oneof="value", message="TensorboardTensor", + ) + + blobs = proto.Field( + proto.MESSAGE, number=5, oneof="value", message="TensorboardBlobSequence", + ) + + wall_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,) + + step = proto.Field(proto.INT64, number=2) + + +class Scalar(proto.Message): + r"""One point viewable on a scalar metric plot. + + Attributes: + value (float): + Value of the point at this step / timestamp. + """ + + value = proto.Field(proto.DOUBLE, number=1) + + +class TensorboardTensor(proto.Message): + r"""One point viewable on a tensor metric plot. + + Attributes: + value (bytes): + Required. Serialized form of + https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto + version_number (int): + Optional. Version number of TensorProto used to serialize + [value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value]. + """ + + value = proto.Field(proto.BYTES, number=1) + + version_number = proto.Field(proto.INT32, number=2) + + +class TensorboardBlobSequence(proto.Message): + r"""One point viewable on a blob metric plot, but mostly just a wrapper + message to work around repeated fields can't be used directly within + ``oneof`` fields. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): + List of blobs contained within the sequence. + """ + + values = proto.RepeatedField(proto.MESSAGE, number=1, message="TensorboardBlob",) + + +class TensorboardBlob(proto.Message): + r"""One blob (e.g, image, graph) viewable on a blob metric plot. + + Attributes: + id (str): + Output only. A URI safe key uniquely + identifying a blob. Can be used to locate the + blob stored in the Cloud Storage bucket of the + consumer project. + data (bytes): + Optional. The bytes of the blob is not + present unless it's returned by the + ReadTensorboardBlobData endpoint. + """ + + id = proto.Field(proto.STRING, number=1) + + data = proto.Field(proto.BYTES, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py new file mode 100644 index 0000000000..6c073aa5e8 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"TensorboardExperiment",}, +) + + +class TensorboardExperiment(proto.Message): + r"""A TensorboardExperiment is a group of TensorboardRuns, that + are typically the results of a training job run, in a + Tensorboard. + + Attributes: + name (str): + Output only. Name of the TensorboardExperiment. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + display_name (str): + User provided name of this + TensorboardExperiment. + description (str): + Description of this TensorboardExperiment. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was last updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment.LabelsEntry]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + + - output only, its value is the + [metadata_schema's][metadata_schema_uri] title. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + source (str): + Immutable. Source of the + TensorboardExperiment. Example: a custom + training job. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + etag = proto.Field(proto.STRING, number=7) + + source = proto.Field(proto.STRING, number=8) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py new file mode 100644 index 0000000000..f9cff272c4 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"TensorboardRun",}, +) + + +class TensorboardRun(proto.Message): + r"""TensorboardRun maps to a specific execution of a training job + with a given set of hyperparameter values, model definition, + dataset, etc + + Attributes: + name (str): + Output only. Name of the TensorboardRun. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + display_name (str): + Required. User provided name of this + TensorboardRun. This value must be unique among + all TensorboardRuns belonging to the same parent + TensorboardExperiment. + description (str): + Description of this TensorboardRun. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was last updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun.LabelsEntry]): + + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=8) + + etag = proto.Field(proto.STRING, number=9) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py new file mode 100644 index 0000000000..32b7aa3dbe --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py @@ -0,0 +1,892 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_experiment as gca_tensorboard_experiment, +) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_time_series as gca_tensorboard_time_series, +) +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateTensorboardRequest", + "GetTensorboardRequest", + "ListTensorboardsRequest", + "ListTensorboardsResponse", + "UpdateTensorboardRequest", + "DeleteTensorboardRequest", + "CreateTensorboardExperimentRequest", + "GetTensorboardExperimentRequest", + "ListTensorboardExperimentsRequest", + "ListTensorboardExperimentsResponse", + "UpdateTensorboardExperimentRequest", + "DeleteTensorboardExperimentRequest", + "CreateTensorboardRunRequest", + "GetTensorboardRunRequest", + "ReadTensorboardBlobDataRequest", + "ReadTensorboardBlobDataResponse", + "ListTensorboardRunsRequest", + "ListTensorboardRunsResponse", + "UpdateTensorboardRunRequest", + "DeleteTensorboardRunRequest", + "CreateTensorboardTimeSeriesRequest", + "GetTensorboardTimeSeriesRequest", + "ListTensorboardTimeSeriesRequest", + "ListTensorboardTimeSeriesResponse", + "UpdateTensorboardTimeSeriesRequest", + "DeleteTensorboardTimeSeriesRequest", + "ReadTensorboardTimeSeriesDataRequest", + "ReadTensorboardTimeSeriesDataResponse", + "WriteTensorboardRunDataRequest", + "WriteTensorboardRunDataResponse", + "ExportTensorboardTimeSeriesDataRequest", + "ExportTensorboardTimeSeriesDataResponse", + "CreateTensorboardOperationMetadata", + "UpdateTensorboardOperationMetadata", + }, +) + + +class CreateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + tensorboard = proto.Field( + proto.MESSAGE, number=2, message=gca_tensorboard.Tensorboard, + ) + + +class GetTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListTensorboardsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Attributes: + parent (str): + Required. The resource name of the Location + to list Tensorboards. Format: + 'projects/{project}/locations/{location}' + filter (str): + Lists the Tensorboards that match the filter + expression. + page_size (int): + The maximum number of Tensorboards to return. + The service may return fewer than this value. If + unspecified, at most 100 Tensorboards will be + returned. The maximum value is 100; values above + 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask.FieldMask,) + + +class ListTensorboardsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Attributes: + tensorboards (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard]): + The Tensorboards mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboards = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_tensorboard.Tensorboard, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + update_mask = proto.Field(proto.MESSAGE, number=1, message=field_mask.FieldMask,) + + tensorboard = proto.Field( + proto.MESSAGE, number=2, message=gca_tensorboard.Tensorboard, + ) + + +class DeleteTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard to be deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + + Attributes: + parent (str): + Required. The resource name of the Tensorboard to create the + TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + The TensorboardExperiment to create. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field(proto.STRING, number=1) + + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + + tensorboard_experiment_id = proto.Field(proto.STRING, number=3) + + +class GetTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListTensorboardExperimentsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardExperiments. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + filter (str): + Lists the TensorboardExperiments that match + the filter expression. + page_size (int): + The maximum number of TensorboardExperiments + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardExperiments will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask.FieldMask,) + + +class ListTensorboardExperimentsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + tensorboard_experiments (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment]): + The TensorboardExperiments mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardExperimentsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_experiments = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is used + to identify the TensorboardExperiment to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + update_mask = proto.Field(proto.MESSAGE, number=1, message=field_mask.FieldMask,) + + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + + +class DeleteTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + + Attributes: + parent (str): + Required. The resource name of the Tensorboard to create the + TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun to create. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which will + become the final component of the Tensorboard run's resource + name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field(proto.STRING, number=1) + + tensorboard_run = proto.Field( + proto.MESSAGE, number=2, message=gca_tensorboard_run.TensorboardRun, + ) + + tensorboard_run_id = proto.Field(proto.STRING, number=3) + + +class GetTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ReadTensorboardBlobDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + time_series (str): + Required. The resource name of the TensorboardTimeSeries to + list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + blob_ids (Sequence[str]): + IDs of the blobs to read. + """ + + time_series = proto.Field(proto.STRING, number=1) + + blob_ids = proto.RepeatedField(proto.STRING, number=2) + + +class ReadTensorboardBlobDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + blobs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): + Blob messages containing blob bytes. + """ + + blobs = proto.RepeatedField( + proto.MESSAGE, number=1, message=tensorboard_data.TensorboardBlob, + ) + + +class ListTensorboardRunsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Attributes: + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardRuns. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + filter (str): + Lists the TensorboardRuns that match the + filter expression. + page_size (int): + The maximum number of TensorboardRuns to + return. The service may return fewer than this + value. If unspecified, at most 50 + TensorboardRuns will be returned. The maximum + value is 1000; values above 1000 will be coerced + to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask.FieldMask,) + + +class ListTensorboardRunsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Attributes: + tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): + The TensorboardRuns mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardRunsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_runs = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_tensorboard_run.TensorboardRun, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the update. + The fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + update_mask = proto.Field(proto.MESSAGE, number=1, message=field_mask.FieldMask,) + + tensorboard_run = proto.Field( + proto.MESSAGE, number=2, message=gca_tensorboard_run.TensorboardRun, + ) + + +class DeleteTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardRun to create + the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + tensorboard_time_series_id (str): + Optional. The user specified unique ID to use for the + TensorboardTimeSeries, which will become the final component + of the TensorboardTimeSeries's resource name. Ref: + go/ucaip-user-specified-id + + This value should match "[a-z0-9][a-z0-9-]{0, 127}". + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries to + create. + """ + + parent = proto.Field(proto.STRING, number=1) + + tensorboard_time_series_id = proto.Field(proto.STRING, number=3) + + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class GetTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the + TensorboardRun to list TensorboardTimeSeries. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + filter (str): + Lists the TensorboardTimeSeries that match + the filter expression. + page_size (int): + The maximum number of TensorboardTimeSeries + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardTimeSeries will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + read_mask = proto.Field(proto.MESSAGE, number=6, message=field_mask.FieldMask,) + + +class ListTensorboardTimeSeriesResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): + The TensorboardTimeSeries mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardTimeSeriesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_time_series = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is used + to identify the TensorboardTimeSeries to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + update_mask = proto.Field(proto.MESSAGE, number=1, message=field_mask.FieldMask,) + + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class DeleteTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ReadTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + max_data_points (int): + The maximum number of TensorboardTimeSeries' + data to return. + This value should be a positive integer. + This value can be set to -1 to return all data. + filter (str): + Reads the TensorboardTimeSeries' data that + match the filter expression. + """ + + tensorboard_time_series = proto.Field(proto.STRING, number=1) + + max_data_points = proto.Field(proto.INT32, number=2) + + filter = proto.Field(proto.STRING, number=3) + + +class ReadTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + time_series_data (google.cloud.aiplatform_v1beta1.types.TimeSeriesData): + The returned time series data. + """ + + time_series_data = proto.Field( + proto.MESSAGE, number=1, message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardRunDataRequest(proto.Message): + r"""Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + Attributes: + tensorboard_run (str): + Required. The resource name of the TensorboardRun to write + data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries data to + write. Values with in a time series are indexed + by their step value. Repeated writes to the same + step will overwrite the existing value for that + step. + The upper limit of data points per write request + is 5000. + """ + + tensorboard_run = proto.Field(proto.STRING, number=1) + + time_series_data = proto.RepeatedField( + proto.MESSAGE, number=2, message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardRunDataResponse(proto.Message): + r"""Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + """ + + +class ExportTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + filter (str): + Exports the TensorboardTimeSeries' data that + match the filter expression. + page_size (int): + The maximum number of data points to return per page. The + default page_size will be 1000. Values must be between 1 and + 10000. Values above 10000 will be coerced to 10000. + page_token (str): + A page token, received from a previous + [TensorboardService.ExportTensorboardTimeSeries][] call. + Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ExportTensorboardTimeSeries][] must + match the call that provided the page token. + order_by (str): + Field to use to sort the + TensorboardTimeSeries' data. By default, + TensorboardTimeSeries' data will be returned in + a pseudo random order. + """ + + tensorboard_time_series = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + order_by = proto.Field(proto.STRING, number=5) + + +class ExportTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + time_series_data_points (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): + The returned time series data points. + next_page_token (str): + A token, which can be sent as + [ExportTensorboardTimeSeriesRequest.page_token][] to + retrieve the next page. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + time_series_data_points = proto.RepeatedField( + proto.MESSAGE, number=1, message=tensorboard_data.TimeSeriesDataPoint, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class CreateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform create Tensorboard. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +class UpdateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform update Tensorboard. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py new file mode 100644 index 0000000000..47a66d38f6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"TensorboardTimeSeries",}, +) + + +class TensorboardTimeSeries(proto.Message): + r"""TensorboardTimeSeries maps to times series produced in + training runs + + Attributes: + name (str): + Output only. Name of the + TensorboardTimeSeries. + display_name (str): + Required. User provided name of this + TensorboardTimeSeries. This value should be + unique among all TensorboardTimeSeries resources + belonging to the same TensorboardRun resource + (parent resource). + description (str): + Description of this TensorboardTimeSeries. + value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. Type of + TensorboardTimeSeries value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was last updated. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + plugin_name (str): + Immutable. Name of the plugin this time + series pertain to. Such as Scalar, Tensor, Blob + plugin_data (bytes): + Data of the current plugin, with the size + limited to 65KB. + metadata (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.Metadata): + Output only. Scalar, Tensor, or Blob metadata + for this TensorboardTimeSeries. + """ + + class ValueType(proto.Enum): + r"""An enum representing the value type of a + TensorboardTimeSeries. + """ + VALUE_TYPE_UNSPECIFIED = 0 + SCALAR = 1 + TENSOR = 2 + BLOB_SEQUENCE = 3 + + class Metadata(proto.Message): + r"""Describes metadata for a TensorboardTimeSeries. + + Attributes: + max_step (int): + Output only. Max step index of all data + points within a TensorboardTimeSeries. + max_wall_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Max wall clock timestamp of all + data points within a TensorboardTimeSeries. + max_blob_sequence_length (int): + Output only. The largest blob sequence length (number of + blobs) of all data points in this time series, if its + ValueType is BLOB_SEQUENCE. + """ + + max_step = proto.Field(proto.INT64, number=1) + + max_wall_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp.Timestamp, + ) + + max_blob_sequence_length = proto.Field(proto.INT64, number=3) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + value_type = proto.Field(proto.ENUM, number=4, enum=ValueType,) + + create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,) + + etag = proto.Field(proto.STRING, number=7) + + plugin_name = proto.Field(proto.STRING, number=8) + + plugin_data = proto.Field(proto.BYTES, number=9) + + metadata = proto.Field(proto.MESSAGE, number=10, message=Metadata,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py index 3c03b0f47d..52c716bfed 100644 --- a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -45,7 +45,7 @@ class TrainingPipeline(proto.Message): Model. It always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - ``upload`` + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to AI Platform, and evaluate the Model. Attributes: @@ -58,11 +58,11 @@ class TrainingPipeline(proto.Message): input_data_config (google.cloud.aiplatform_v1beta1.types.InputDataConfig): Specifies AI Platform owned input data that may be used for training the Model. The TrainingPipeline's - ``training_task_definition`` + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make clear whether this config is used and if there are any special requirements on how it should be filled. If nothing about this config is mentioned in the - ``training_task_definition``, + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], then it should be assumed that the TrainingPipeline does not depend on this configuration. training_task_definition (str): @@ -81,27 +81,27 @@ class TrainingPipeline(proto.Message): training_task_inputs (google.protobuf.struct_pb2.Value): Required. The training task's parameter(s), as specified in the - ``training_task_definition``'s + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s ``inputs``. training_task_metadata (google.protobuf.struct_pb2.Value): Output only. The metadata information as specified in the - ``training_task_definition``'s + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s ``metadata``. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's - ``training_task_definition`` + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] contains ``metadata`` object. model_to_upload (google.cloud.aiplatform_v1beta1.types.Model): Describes the Model that may be uploaded (via - ``ModelService.UploadModel``) + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]) by this TrainingPipeline. The TrainingPipeline's - ``training_task_definition`` + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make clear whether this Model description should be populated, and if there are any special requirements regarding how it should be filled. If nothing is mentioned in the - ``training_task_definition``, + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], then it should be assumed that this field should not be filled and the training task either uploads the Model without a need of this information, or that training task @@ -109,7 +109,7 @@ class TrainingPipeline(proto.Message): When the Pipeline's state becomes ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been uploaded into AI Platform, then the model_to_upload's - resource ``name`` + resource [name][google.cloud.aiplatform.v1beta1.Model.name] is populated. The Model is always uploaded into the Project and Location in which this pipeline is. state (google.cloud.aiplatform_v1beta1.types.PipelineState): @@ -147,7 +147,7 @@ class TrainingPipeline(proto.Message): Note: Model trained by this TrainingPipeline is also secured by this key if - ``model_to_upload`` + [model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec] is not set separately. """ @@ -270,7 +270,7 @@ class InputDataConfig(proto.Message): the DataItem they are on (for the auto-assigned that role is decided by AI Platform). A filter with same syntax as the one used in - ``ListAnnotations`` + [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. annotation_schema_uri (str): @@ -284,9 +284,9 @@ class InputDataConfig(proto.Message): schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the chosen schema must be consistent with - ``metadata`` + [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] of the Dataset specified by - ``dataset_id``. + [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id]. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in @@ -294,11 +294,11 @@ class InputDataConfig(proto.Message): the role of the DataItem they are on. When used in conjunction with - ``annotations_filter``, + [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], the Annotations used for training are filtered by both - ``annotations_filter`` + [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter] and - ``annotation_schema_uri``. + [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri]. """ fraction_split = proto.Field( @@ -375,7 +375,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in - ``DatasetService.ListDataItems`` + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, @@ -384,7 +384,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in - ``DatasetService.ListDataItems`` + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, @@ -393,7 +393,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in - ``DatasetService.ListDataItems`` + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, diff --git a/google/cloud/aiplatform_v1beta1/types/types.py b/google/cloud/aiplatform_v1beta1/types/types.py new file mode 100644 index 0000000000..53581d3bdb --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/types.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={"BoolArray", "DoubleArray", "Int64Array", "StringArray",}, +) + + +class BoolArray(proto.Message): + r"""A list of boolean values. + + Attributes: + values (Sequence[bool]): + A list of bool values. + """ + + values = proto.RepeatedField(proto.BOOL, number=1) + + +class DoubleArray(proto.Message): + r"""A list of double values. + + Attributes: + values (Sequence[float]): + A list of bool values. + """ + + values = proto.RepeatedField(proto.DOUBLE, number=1) + + +class Int64Array(proto.Message): + r"""A list of int64 values. + + Attributes: + values (Sequence[int]): + A list of int64 values. + """ + + values = proto.RepeatedField(proto.INT64, number=1) + + +class StringArray(proto.Message): + r"""A list of string values. + + Attributes: + values (Sequence[str]): + A list of string values. + """ + + values = proto.RepeatedField(proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/user_action_reference.py b/google/cloud/aiplatform_v1beta1/types/user_action_reference.py index 25180ae567..7c51035fbf 100644 --- a/google/cloud/aiplatform_v1beta1/types/user_action_reference.py +++ b/google/cloud/aiplatform_v1beta1/types/user_action_reference.py @@ -39,8 +39,9 @@ class UserActionReference(proto.Message): LabelingJob. Format: 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' method (str): - The method name of the API call. For example, - "/google.cloud.aiplatform.v1alpha1.DatasetService.CreateDataset". + The method name of the API RPC call. For + example, + "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". """ operation = proto.Field(proto.STRING, number=1, oneof="reference") diff --git a/google/cloud/aiplatform_v1beta1/types/value.py b/google/cloud/aiplatform_v1beta1/types/value.py new file mode 100644 index 0000000000..fe79c9e2e8 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/value.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"Value",}, +) + + +class Value(proto.Message): + r"""Value is the value of the field. + + Attributes: + int_value (int): + An integer value. + double_value (float): + A double value. + string_value (str): + A string value. + """ + + int_value = proto.Field(proto.INT64, number=1, oneof="value") + + double_value = proto.Field(proto.DOUBLE, number=2, oneof="value") + + string_value = proto.Field(proto.STRING, number=3, oneof="value") + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/vizier_service.py b/google/cloud/aiplatform_v1beta1/types/vizier_service.py index 2b837c476e..1808933a7f 100644 --- a/google/cloud/aiplatform_v1beta1/types/vizier_service.py +++ b/google/cloud/aiplatform_v1beta1/types/vizier_service.py @@ -54,7 +54,7 @@ class GetStudyRequest(proto.Message): r"""Request message for - ``VizierService.GetStudy``. + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. Attributes: name (str): @@ -67,7 +67,7 @@ class GetStudyRequest(proto.Message): class CreateStudyRequest(proto.Message): r"""Request message for - ``VizierService.CreateStudy``. + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. Attributes: parent (str): @@ -86,7 +86,7 @@ class CreateStudyRequest(proto.Message): class ListStudiesRequest(proto.Message): r"""Request message for - ``VizierService.ListStudies``. + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. Attributes: parent (str): @@ -112,7 +112,7 @@ class ListStudiesRequest(proto.Message): class ListStudiesResponse(proto.Message): r"""Response message for - ``VizierService.ListStudies``. + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. Attributes: studies (Sequence[google.cloud.aiplatform_v1beta1.types.Study]): @@ -134,7 +134,7 @@ def raw_page(self): class DeleteStudyRequest(proto.Message): r"""Request message for - ``VizierService.DeleteStudy``. + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. Attributes: name (str): @@ -148,7 +148,7 @@ class DeleteStudyRequest(proto.Message): class LookupStudyRequest(proto.Message): r"""Request message for - ``VizierService.LookupStudy``. + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. Attributes: parent (str): @@ -166,7 +166,7 @@ class LookupStudyRequest(proto.Message): class SuggestTrialsRequest(proto.Message): r"""Request message for - ``VizierService.SuggestTrials``. + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. Attributes: parent (str): @@ -195,7 +195,7 @@ class SuggestTrialsRequest(proto.Message): class SuggestTrialsResponse(proto.Message): r"""Response message for - ``VizierService.SuggestTrials``. + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. Attributes: trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): @@ -243,7 +243,7 @@ class SuggestTrialsMetadata(proto.Message): class CreateTrialRequest(proto.Message): r"""Request message for - ``VizierService.CreateTrial``. + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. Attributes: parent (str): @@ -261,7 +261,7 @@ class CreateTrialRequest(proto.Message): class GetTrialRequest(proto.Message): r"""Request message for - ``VizierService.GetTrial``. + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. Attributes: name (str): @@ -274,7 +274,7 @@ class GetTrialRequest(proto.Message): class ListTrialsRequest(proto.Message): r"""Request message for - ``VizierService.ListTrials``. + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. Attributes: parent (str): @@ -300,7 +300,7 @@ class ListTrialsRequest(proto.Message): class ListTrialsResponse(proto.Message): r"""Response message for - ``VizierService.ListTrials``. + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. Attributes: trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): @@ -322,7 +322,7 @@ def raw_page(self): class AddTrialMeasurementRequest(proto.Message): r"""Request message for - ``VizierService.AddTrialMeasurement``. + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. Attributes: trial_name (str): @@ -340,7 +340,7 @@ class AddTrialMeasurementRequest(proto.Message): class CompleteTrialRequest(proto.Message): r"""Request message for - ``VizierService.CompleteTrial``. + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. Attributes: name (str): @@ -373,7 +373,7 @@ class CompleteTrialRequest(proto.Message): class DeleteTrialRequest(proto.Message): r"""Request message for - ``VizierService.DeleteTrial``. + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. Attributes: name (str): @@ -386,7 +386,7 @@ class DeleteTrialRequest(proto.Message): class CheckTrialEarlyStoppingStateRequest(proto.Message): r"""Request message for - ``VizierService.CheckTrialEarlyStoppingState``. + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. Attributes: trial_name (str): @@ -399,7 +399,7 @@ class CheckTrialEarlyStoppingStateRequest(proto.Message): class CheckTrialEarlyStoppingStateResponse(proto.Message): r"""Response message for - ``VizierService.CheckTrialEarlyStoppingState``. + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. Attributes: should_stop (bool): @@ -435,7 +435,7 @@ class CheckTrialEarlyStoppingStateMetatdata(proto.Message): class StopTrialRequest(proto.Message): r"""Request message for - ``VizierService.StopTrial``. + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. Attributes: name (str): @@ -448,7 +448,7 @@ class StopTrialRequest(proto.Message): class ListOptimalTrialsRequest(proto.Message): r"""Request message for - ``VizierService.ListOptimalTrials``. + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. Attributes: parent (str): @@ -461,7 +461,7 @@ class ListOptimalTrialsRequest(proto.Message): class ListOptimalTrialsResponse(proto.Message): r"""Response message for - ``VizierService.ListOptimalTrials``. + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. Attributes: optimal_trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): diff --git a/noxfile.py b/noxfile.py index 2cb95f3d6d..38bf2db67d 100644 --- a/noxfile.py +++ b/noxfile.py @@ -18,6 +18,7 @@ from __future__ import absolute_import import os +import pathlib import shutil import nox @@ -30,6 +31,8 @@ SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + # 'docfx' is excluded since it only needs to run in 'docs-presubmit' nox.options.sessions = [ "unit", @@ -59,16 +62,9 @@ def lint(session): session.run("flake8", "google", "tests") -@nox.session(python="3.6") +@nox.session(python=DEFAULT_PYTHON_VERSION) def blacken(session): - """Run black. - - Format code to uniform standard. - - This currently uses Python 3.6 due to the automated Kokoro run of synthtool. - That run uses an image that doesn't have 3.6 installed. Before updating this - check the state of the `gcp_ubuntu_config` we use for that Kokoro run. - """ + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) session.run( "black", *BLACK_PATHS, @@ -84,13 +80,15 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("asyncmock", "pytest-asyncio") - session.install( - "mock", "pytest", "pytest-cov", + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) + session.install("asyncmock", "pytest-asyncio", "-c", constraints_path) - session.install("-e", ".") + session.install("mock", "pytest", "pytest-cov", "-c", constraints_path) + + session.install("-e", ".", "-c", constraints_path) # Run py.test against the unit tests. session.run( @@ -117,15 +115,15 @@ def unit(session): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": session.skip("RUN_SYSTEM_TESTS is set to false, skipping") - # Sanity check: Only run tests if the environment variable is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable") # Install pyopenssl for mTLS testing. if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": session.install("pyopenssl") @@ -141,10 +139,8 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install( - "mock", "pytest", "google-cloud-testutils", - ) - session.install("-e", ".") + session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) + session.install("-e", ".", "-c", constraints_path) # Run py.test against the system tests. if system_test_exists: @@ -173,7 +169,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=99") + session.run("coverage", "report", "--show-missing", "--fail-under=95") session.run("coverage", "erase") diff --git a/renovate.json b/renovate.json index f08bc22c9a..c04895563e 100644 --- a/renovate.json +++ b/renovate.json @@ -2,5 +2,8 @@ "extends": [ "config:base", ":preserveSemverRanges" ], - "ignorePaths": [".pre-commit-config.yaml"] + "ignorePaths": [".pre-commit-config.yaml"], + "pip_requirements": { + "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"] + } } diff --git a/synth.py b/synth.py index 70bd6f3701..d0ff3de448 100644 --- a/synth.py +++ b/synth.py @@ -86,12 +86,6 @@ # Patch the library # ---------------------------------------------------------------------------- -s.replace( - "**/client.py", - "client_options: ClientOptions = ", - "client_options: ClientOptions.ClientOptions = ", -) - # Generator adds a bad import statement to enhanced type; # need to fix in post-processing steps. @@ -107,56 +101,6 @@ -# post processing to fix the generated reference doc -from synthtool import transforms as st -import re - -# https://github.com/googleapis/gapic-generator-python/issues/479 -paths = st._filter_files(st._expand_paths("google/cloud/**/*.py", ".")) - -pattern = r"(:\w+:``[^`]+``)" -expr = re.compile(pattern, flags=re.MULTILINE) -replaces = [] -for path in paths: - with path.open("r+") as fh: - content = fh.read() - matches = re.findall(expr, content) - if matches: - for match in matches: - before = match - after = match.replace("``", "`") - replaces.append((path, before, after)) - -for path, before, after in replaces: - s.replace([path], before, after) - - -# https://github.com/googleapis/gapic-generator-python/issues/483 -paths = st._filter_files(st._expand_paths("google/cloud/**/*.py", ".")) -pattern = r"(?P\[(?P[\w.]+)\]\[(?P[\w.]+)\])" -expr = re.compile(pattern, flags=re.MULTILINE) -replaces = [] -for path in paths: - with path.open("r+") as fh: - content = fh.read() - for match in expr.finditer(content): - before = match.groupdict()["full"].replace("[", "\[").replace("]", "\]") - after = match.groupdict()["first"] - after = f"``{after}``" - replaces.append((path, before, after)) - -for path, before, after in replaces: - s.replace([path], before, after) - - -s.replace("google/cloud/**/*.py", "\]\(\n\n\s*", "](") - -s.replace("google/cloud/**/*.py", "\s*//\n\s*", "") - -s.replace("google/cloud/**/*.py", "https:[\n]*\s*//", "https://") - -s.replace("google/cloud/**/*.py", "[\n]*\s*//\s*/", "/") - # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/constraints-3.9.txt b/testing/constraints-3.9.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py index 1597014605..c59b335074 100644 --- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -102,7 +102,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,], + "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,] ) def test_dataset_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -119,7 +119,7 @@ def test_dataset_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,], + "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,] ) def test_dataset_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index bf351a3978..90d41c04c0 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -99,7 +99,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,], + "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,] ) def test_endpoint_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -116,7 +116,7 @@ def test_endpoint_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,], + "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,] ) def test_endpoint_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 50d1339247..ea8d1d502b 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -114,9 +114,7 @@ def test__get_default_mtls_endpoint(): assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -@pytest.mark.parametrize( - "client_class", [JobServiceClient, JobServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,]) def test_job_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( @@ -131,9 +129,7 @@ def test_job_service_client_from_service_account_info(client_class): assert client.transport._host == "aiplatform.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [JobServiceClient, JobServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,]) def test_job_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 04bc7c392a..d1b0b51231 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -93,7 +93,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,], + "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,] ) def test_migration_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -110,7 +110,7 @@ def test_migration_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,], + "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,] ) def test_migration_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -1574,21 +1574,19 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - location = "clam" - dataset = "whelk" + dataset = "clam" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", + "project": "whelk", + "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1598,19 +1596,21 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", + "project": "mussel", + "location": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py index 15e4bad05d..f74aea2dea 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -97,9 +97,7 @@ def test__get_default_mtls_endpoint(): assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -@pytest.mark.parametrize( - "client_class", [ModelServiceClient, ModelServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [ModelServiceClient, ModelServiceAsyncClient,]) def test_model_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( @@ -114,9 +112,7 @@ def test_model_service_client_from_service_account_info(client_class): assert client.transport._host == "aiplatform.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [ModelServiceClient, ModelServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [ModelServiceClient, ModelServiceAsyncClient,]) def test_model_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index 21e6d0d44f..d0079aae4d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -105,7 +105,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,], + "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,] ) def test_pipeline_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -122,7 +122,7 @@ def test_pipeline_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,], + "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,] ) def test_pipeline_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() diff --git a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py index d5099832f0..339187f22a 100644 --- a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -98,7 +98,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,], + "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,] ) def test_specialist_pool_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -115,7 +115,7 @@ def test_specialist_pool_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,], + "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,] ) def test_specialist_pool_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index 6042fa6f42..5a3818dc9d 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -104,7 +104,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,], + "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,] ) def test_dataset_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -121,7 +121,7 @@ def test_dataset_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,], + "client_class", [DatasetServiceClient, DatasetServiceAsyncClient,] ) def test_dataset_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index bda98b26a5..a8ee297c20 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -104,7 +104,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,], + "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,] ) def test_endpoint_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -121,7 +121,7 @@ def test_endpoint_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,], + "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,] ) def test_endpoint_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py new file mode 100644 index 0000000000..db9a7d5367 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py @@ -0,0 +1,1501 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import ( + FeaturestoreOnlineServingServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import ( + FeaturestoreOnlineServingServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import ( + transports, +) +from google.cloud.aiplatform_v1beta1.types import feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.oauth2 import service_account + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint( + api_mtls_endpoint + ) + == api_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint( + sandbox_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint( + sandbox_mtls_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", + [ + FeaturestoreOnlineServingServiceClient, + FeaturestoreOnlineServingServiceAsyncClient, + ], +) +def test_featurestore_online_serving_service_client_from_service_account_info( + client_class, +): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", + [ + FeaturestoreOnlineServingServiceClient, + FeaturestoreOnlineServingServiceAsyncClient, + ], +) +def test_featurestore_online_serving_service_client_from_service_account_file( + client_class, +): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_online_serving_service_client_get_transport_class(): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + FeaturestoreOnlineServingServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceClient), +) +@mock.patch.object( + FeaturestoreOnlineServingServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient), +) +def test_featurestore_online_serving_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + FeaturestoreOnlineServingServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + FeaturestoreOnlineServingServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + "true", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + "false", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + FeaturestoreOnlineServingServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceClient), +) +@mock.patch.object( + FeaturestoreOnlineServingServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_online_serving_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_online_serving_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreOnlineServingServiceClient, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreOnlineServingServiceAsyncClient, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_online_serving_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_featurestore_online_serving_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreOnlineServingServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_read_feature_values( + transport: str = "grpc", + request_type=featurestore_online_service.ReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + response = client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_read_feature_values_from_dict(): + test_read_feature_values(request_type=dict) + + +def test_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + client.read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_read_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_online_service.ReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.ReadFeatureValuesResponse() + ) + + response = await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_read_feature_values_async_from_dict(): + await test_read_feature_values_async(request_type=dict) + + +def test_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.ReadFeatureValuesResponse() + ) + + await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +def test_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_online_service.ReadFeatureValuesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_streaming_read_feature_values( + transport: str = "grpc", + request_type=featurestore_online_service.StreamingReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + + response = client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert ( + args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + ) + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance( + message, featurestore_online_service.ReadFeatureValuesResponse + ) + + +def test_streaming_read_feature_values_from_dict(): + test_streaming_read_feature_values(request_type=dict) + + +def test_streaming_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + client.streaming_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert ( + args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + ) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_online_service.StreamingReadFeatureValuesRequest, +): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[featurestore_online_service.ReadFeatureValuesResponse()] + ) + + response = await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert ( + args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + ) + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async_from_dict(): + await test_streaming_read_feature_values_async(request_type=dict) + + +def test_streaming_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + + client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[featurestore_online_service.ReadFeatureValuesResponse()] + ) + + await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_streaming_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.streaming_read_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +def test_streaming_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [featurestore_online_service.ReadFeatureValuesResponse()] + ) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.streaming_read_feature_values( + entity_type="entity_type_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = FeaturestoreOnlineServingServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, transports.FeaturestoreOnlineServingServiceGrpcTransport, + ) + + +def test_featurestore_online_serving_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_featurestore_online_serving_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "read_feature_values", + "streaming_read_feature_values", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_featurestore_online_serving_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_featurestore_online_serving_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport() + adc.assert_called_once() + + +def test_featurestore_online_serving_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + FeaturestoreOnlineServingServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_featurestore_online_serving_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.FeaturestoreOnlineServingServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_featurestore_online_serving_service_host_no_port(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_online_serving_service_host_with_port(): + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_featurestore_online_serving_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( + transport_class, +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + actual = FeaturestoreOnlineServingServiceClient.entity_type_path( + project, location, featurestore, entity_type + ) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path( + billing_account + ) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = FeaturestoreOnlineServingServiceClient.common_billing_account_path( + **expected + ) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path( + path + ) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + + expected = "folders/{folder}".format(folder=folder,) + actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + + expected = "organizations/{organization}".format(organization=organization,) + actual = FeaturestoreOnlineServingServiceClient.common_organization_path( + organization + ) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + + expected = "projects/{project}".format(project=project,) + actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = FeaturestoreOnlineServingServiceClient.common_location_path( + project, location + ) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = FeaturestoreOnlineServingServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py new file mode 100644 index 0000000000..cffb5d0ade --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -0,0 +1,6319 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( + FeaturestoreServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( + FeaturestoreServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.services.featurestore_service import transports +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [FeaturestoreServiceClient, FeaturestoreServiceAsyncClient,] +) +def test_featurestore_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [FeaturestoreServiceClient, FeaturestoreServiceAsyncClient,] +) +def test_featurestore_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_service_client_get_transport_class(): + transport = FeaturestoreServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + FeaturestoreServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceClient), +) +@mock.patch.object( + FeaturestoreServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceAsyncClient), +) +def test_featurestore_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + "true", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + "false", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + FeaturestoreServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceClient), +) +@mock.patch.object( + FeaturestoreServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(FeaturestoreServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + FeaturestoreServiceClient, + transports.FeaturestoreServiceGrpcTransport, + "grpc", + ), + ( + FeaturestoreServiceAsyncClient, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_featurestore_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_featurestore_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_featurestore( + transport: str = "grpc", request_type=featurestore_service.CreateFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_featurestore_from_dict(): + test_create_featurestore(request_type=dict) + + +def test_create_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + client.create_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_create_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.CreateFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_featurestore_async_from_dict(): + await test_create_featurestore_async(request_type=dict) + + +def test_create_featurestore_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_featurestore_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_featurestore( + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + + +def test_create_featurestore_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_featurestore( + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent="parent_value", + featurestore=gca_featurestore.Featurestore(name="name_value"), + ) + + +def test_get_featurestore( + transport: str = "grpc", request_type=featurestore_service.GetFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore( + name="name_value", + display_name="display_name_value", + etag="etag_value", + state=featurestore.Featurestore.State.STABLE, + ) + + response = client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, featurestore.Featurestore) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.state == featurestore.Featurestore.State.STABLE + + +def test_get_featurestore_from_dict(): + test_get_featurestore(request_type=dict) + + +def test_get_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + client.get_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_get_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.GetFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore.Featurestore( + name="name_value", + display_name="display_name_value", + etag="etag_value", + state=featurestore.Featurestore.State.STABLE, + ) + ) + + response = await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.state == featurestore.Featurestore.State.STABLE + + +@pytest.mark.asyncio +async def test_get_featurestore_async_from_dict(): + await test_get_featurestore_async(request_type=dict) + + +def test_get_featurestore_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + call.return_value = featurestore.Featurestore() + + client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore.Featurestore() + ) + + await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_featurestore_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_featurestore(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_featurestore_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_featurestore), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore.Featurestore() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_featurestore(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), name="name_value", + ) + + +def test_list_featurestores( + transport: str = "grpc", request_type=featurestore_service.ListFeaturestoresRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListFeaturestoresPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_featurestores_from_dict(): + test_list_featurestores(request_type=dict) + + +def test_list_featurestores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + client.list_featurestores() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + +@pytest.mark.asyncio +async def test_list_featurestores_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ListFeaturestoresRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturestoresResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_featurestores_async_from_dict(): + await test_list_featurestores_async(request_type=dict) + + +def test_list_featurestores_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + call.return_value = featurestore_service.ListFeaturestoresResponse() + + client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_featurestores_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturestoresResponse() + ) + + await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_featurestores_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_featurestores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_featurestores_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturestoresResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_featurestores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), parent="parent_value", + ) + + +def test_list_featurestores_pager(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_featurestores(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, featurestore.Featurestore) for i in results) + + +def test_list_featurestores_pages(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_featurestores(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_featurestores_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_featurestores(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, featurestore.Featurestore) for i in responses) + + +@pytest.mark.asyncio +async def test_list_featurestores_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token="abc", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], next_page_token="def", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[featurestore.Featurestore(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_featurestores(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_featurestore( + transport: str = "grpc", request_type=featurestore_service.UpdateFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_featurestore_from_dict(): + test_update_featurestore(request_type=dict) + + +def test_update_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + client.update_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_update_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.UpdateFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_featurestore_async_from_dict(): + await test_update_featurestore_async(request_type=dict) + + +def test_update_featurestore_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + request.featurestore.name = "featurestore.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "featurestore.name=featurestore.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + request.featurestore.name = "featurestore.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "featurestore.name=featurestore.name/value", + ) in kw["metadata"] + + +def test_update_featurestore_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_featurestore_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].featurestore == gca_featurestore.Featurestore(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_featurestore( + transport: str = "grpc", request_type=featurestore_service.DeleteFeaturestoreRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_featurestore_from_dict(): + test_delete_featurestore(request_type=dict) + + +def test_delete_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + client.delete_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_delete_featurestore_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.DeleteFeaturestoreRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_featurestore_async_from_dict(): + await test_delete_featurestore_async(request_type=dict) + + +def test_delete_featurestore_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_featurestore_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_featurestore(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_featurestore_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_featurestore(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), name="name_value", + ) + + +def test_create_entity_type( + transport: str = "grpc", request_type=featurestore_service.CreateEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_entity_type_from_dict(): + test_create_entity_type(request_type=dict) + + +def test_create_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + client.create_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_create_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.CreateEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_entity_type_async_from_dict(): + await test_create_entity_type_async(request_type=dict) + + +def test_create_entity_type_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_entity_type_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_entity_type( + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + + +def test_create_entity_type_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_entity_type( + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent="parent_value", + entity_type=gca_entity_type.EntityType(name="name_value"), + ) + + +def test_get_entity_type( + transport: str = "grpc", request_type=featurestore_service.GetEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + + response = client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, entity_type.EntityType) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +def test_get_entity_type_from_dict(): + test_get_entity_type(request_type=dict) + + +def test_get_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + client.get_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_get_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.GetEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + ) + + response = await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_entity_type_async_from_dict(): + await test_get_entity_type_async(request_type=dict) + + +def test_get_entity_type_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + call.return_value = entity_type.EntityType() + + client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + entity_type.EntityType() + ) + + await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_entity_type_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_entity_type(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_entity_type_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + entity_type.EntityType() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_entity_type(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), name="name_value", + ) + + +def test_list_entity_types( + transport: str = "grpc", request_type=featurestore_service.ListEntityTypesRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListEntityTypesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_entity_types_from_dict(): + test_list_entity_types(request_type=dict) + + +def test_list_entity_types_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + client.list_entity_types() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListEntityTypesRequest() + + +@pytest.mark.asyncio +async def test_list_entity_types_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ListEntityTypesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListEntityTypesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_entity_types_async_from_dict(): + await test_list_entity_types_async(request_type=dict) + + +def test_list_entity_types_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + call.return_value = featurestore_service.ListEntityTypesResponse() + + client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_entity_types_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListEntityTypesResponse() + ) + + await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_entity_types_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_entity_types(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_entity_types_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListEntityTypesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_entity_types(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), parent="parent_value", + ) + + +def test_list_entity_types_pager(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_entity_types(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, entity_type.EntityType) for i in results) + + +def test_list_entity_types_pages(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + pages = list(client.list_entity_types(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_entity_types_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + async_pager = await client.list_entity_types(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, entity_type.EntityType) for i in responses) + + +@pytest.mark.asyncio +async def test_list_entity_types_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token="abc", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], next_page_token="def", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(),], next_page_token="ghi", + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[entity_type.EntityType(), entity_type.EntityType(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_entity_types(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_entity_type( + transport: str = "grpc", request_type=featurestore_service.UpdateEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + + response = client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_entity_type.EntityType) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +def test_update_entity_type_from_dict(): + test_update_entity_type(request_type=dict) + + +def test_update_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + client.update_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_update_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.UpdateEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_entity_type.EntityType( + name="name_value", description="description_value", etag="etag_value", + ) + ) + + response = await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_update_entity_type_async_from_dict(): + await test_update_entity_type_async(request_type=dict) + + +def test_update_entity_type_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + request.entity_type.name = "entity_type.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + call.return_value = gca_entity_type.EntityType() + + client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type.name=entity_type.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + request.entity_type.name = "entity_type.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_entity_type.EntityType() + ) + + await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type.name=entity_type.name/value",) in kw[ + "metadata" + ] + + +def test_update_entity_type_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_entity_type( + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_entity_type_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_entity_type.EntityType() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_entity_type( + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == gca_entity_type.EntityType(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_entity_type( + transport: str = "grpc", request_type=featurestore_service.DeleteEntityTypeRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_entity_type_from_dict(): + test_delete_entity_type(request_type=dict) + + +def test_delete_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + client.delete_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_delete_entity_type_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.DeleteEntityTypeRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_entity_type_async_from_dict(): + await test_delete_entity_type_async(request_type=dict) + + +def test_delete_entity_type_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_entity_type_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_entity_type(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_entity_type_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_entity_type(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), name="name_value", + ) + + +def test_create_feature( + transport: str = "grpc", request_type=featurestore_service.CreateFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_feature_from_dict(): + test_create_feature(request_type=dict) + + +def test_create_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + client.create_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateFeatureRequest() + + +@pytest.mark.asyncio +async def test_create_feature_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.CreateFeatureRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_feature_async_from_dict(): + await test_create_feature_async(request_type=dict) + + +def test_create_feature_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_feature_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_feature( + parent="parent_value", feature=gca_feature.Feature(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].feature == gca_feature.Feature(name="name_value") + + +def test_create_feature_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_feature( + parent="parent_value", feature=gca_feature.Feature(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].feature == gca_feature.Feature(name="name_value") + + +@pytest.mark.asyncio +async def test_create_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + ) + + +def test_batch_create_features( + transport: str = "grpc", + request_type=featurestore_service.BatchCreateFeaturesRequest, +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_create_features_from_dict(): + test_batch_create_features(request_type=dict) + + +def test_batch_create_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + client.batch_create_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + +@pytest.mark.asyncio +async def test_batch_create_features_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.BatchCreateFeaturesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_create_features_async_from_dict(): + await test_batch_create_features_async(request_type=dict) + + +def test_batch_create_features_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_create_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_batch_create_features_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_features( + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].requests == [ + featurestore_service.CreateFeatureRequest(parent="parent_value") + ] + + +def test_batch_create_features_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_features( + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].requests == [ + featurestore_service.CreateFeatureRequest(parent="parent_value") + ] + + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent="parent_value", + requests=[featurestore_service.CreateFeatureRequest(parent="parent_value")], + ) + + +def test_get_feature( + transport: str = "grpc", request_type=featurestore_service.GetFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature( + name="name_value", + description="description_value", + value_type=feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + + response = client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, feature.Feature) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.value_type == feature.Feature.ValueType.BOOL + + assert response.etag == "etag_value" + + +def test_get_feature_from_dict(): + test_get_feature(request_type=dict) + + +def test_get_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + client.get_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetFeatureRequest() + + +@pytest.mark.asyncio +async def test_get_feature_async( + transport: str = "grpc_asyncio", request_type=featurestore_service.GetFeatureRequest +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + feature.Feature( + name="name_value", + description="description_value", + value_type=feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + ) + + response = await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.value_type == feature.Feature.ValueType.BOOL + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_feature_async_from_dict(): + await test_get_feature_async(request_type=dict) + + +def test_get_feature_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + call.return_value = feature.Feature() + + client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + + await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_feature_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_feature_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_feature( + featurestore_service.GetFeatureRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_feature( + featurestore_service.GetFeatureRequest(), name="name_value", + ) + + +def test_list_features( + transport: str = "grpc", request_type=featurestore_service.ListFeaturesRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListFeaturesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_features_from_dict(): + test_list_features(request_type=dict) + + +def test_list_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + client.list_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListFeaturesRequest() + + +@pytest.mark.asyncio +async def test_list_features_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ListFeaturesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_features_async_from_dict(): + await test_list_features_async(request_type=dict) + + +def test_list_features_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + call.return_value = featurestore_service.ListFeaturesResponse() + + client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturesResponse() + ) + + await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_features_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_features(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_features_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_features( + featurestore_service.ListFeaturesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.ListFeaturesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_features(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_features( + featurestore_service.ListFeaturesRequest(), parent="parent_value", + ) + + +def test_list_features_pager(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_features(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) for i in results) + + +def test_list_features_pages(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = list(client.list_features(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + async_pager = await client.list_features(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) for i in responses) + + +@pytest.mark.asyncio +async def test_list_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.ListFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.ListFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_features(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_feature( + transport: str = "grpc", request_type=featurestore_service.UpdateFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature( + name="name_value", + description="description_value", + value_type=gca_feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + + response = client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_feature.Feature) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.value_type == gca_feature.Feature.ValueType.BOOL + + assert response.etag == "etag_value" + + +def test_update_feature_from_dict(): + test_update_feature(request_type=dict) + + +def test_update_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + client.update_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateFeatureRequest() + + +@pytest.mark.asyncio +async def test_update_feature_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.UpdateFeatureRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_feature.Feature( + name="name_value", + description="description_value", + value_type=gca_feature.Feature.ValueType.BOOL, + etag="etag_value", + ) + ) + + response = await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.value_type == gca_feature.Feature.ValueType.BOOL + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_update_feature_async_from_dict(): + await test_update_feature_async(request_type=dict) + + +def test_update_feature_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + request.feature.name = "feature.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + call.return_value = gca_feature.Feature() + + client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "feature.name=feature.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + request.feature.name = "feature.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + + await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "feature.name=feature.name/value",) in kw[ + "metadata" + ] + + +def test_update_feature_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_feature( + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].feature == gca_feature.Feature(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_feature_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_feature( + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].feature == gca_feature.Feature(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_feature( + transport: str = "grpc", request_type=featurestore_service.DeleteFeatureRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_feature_from_dict(): + test_delete_feature(request_type=dict) + + +def test_delete_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + client.delete_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteFeatureRequest() + + +@pytest.mark.asyncio +async def test_delete_feature_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.DeleteFeatureRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_feature_async_from_dict(): + await test_delete_feature_async(request_type=dict) + + +def test_delete_feature_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_feature_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_feature_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_feature( + featurestore_service.DeleteFeatureRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_feature), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_feature(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_feature( + featurestore_service.DeleteFeatureRequest(), name="name_value", + ) + + +def test_import_feature_values( + transport: str = "grpc", + request_type=featurestore_service.ImportFeatureValuesRequest, +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_feature_values_from_dict(): + test_import_feature_values(request_type=dict) + + +def test_import_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + client.import_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_import_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ImportFeatureValuesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_feature_values_async_from_dict(): + await test_import_feature_values_async(request_type=dict) + + +def test_import_feature_values_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_import_feature_values_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +def test_import_feature_values_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_batch_read_feature_values( + transport: str = "grpc", + request_type=featurestore_service.BatchReadFeatureValuesRequest, +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_read_feature_values_from_dict(): + test_batch_read_feature_values(request_type=dict) + + +def test_batch_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + client.batch_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.BatchReadFeatureValuesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async_from_dict(): + await test_batch_read_feature_values_async(request_type=dict) + + +def test_batch_read_feature_values_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + request.featurestore = "featurestore/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "featurestore=featurestore/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + request.featurestore = "featurestore/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "featurestore=featurestore/value",) in kw[ + "metadata" + ] + + +def test_batch_read_feature_values_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_feature_values(featurestore="featurestore_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].featurestore == "featurestore_value" + + +def test_batch_read_feature_values_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore="featurestore_value", + ) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_feature_values( + featurestore="featurestore_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].featurestore == "featurestore_value" + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore="featurestore_value", + ) + + +def test_export_feature_values( + transport: str = "grpc", + request_type=featurestore_service.ExportFeatureValuesRequest, +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_feature_values_from_dict(): + test_export_feature_values(request_type=dict) + + +def test_export_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + client.export_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_export_feature_values_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.ExportFeatureValuesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_feature_values_async_from_dict(): + await test_export_feature_values_async(request_type=dict) + + +def test_export_feature_values_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + request.entity_type = "entity_type/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"] + + +def test_export_feature_values_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +def test_export_feature_values_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_feature_values(entity_type="entity_type_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].entity_type == "entity_type_value" + + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type="entity_type_value", + ) + + +def test_search_features( + transport: str = "grpc", request_type=featurestore_service.SearchFeaturesRequest +): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse( + next_page_token="next_page_token_value", + ) + + response = client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.SearchFeaturesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_search_features_from_dict(): + test_search_features(request_type=dict) + + +def test_search_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + client.search_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.SearchFeaturesRequest() + + +@pytest.mark.asyncio +async def test_search_features_async( + transport: str = "grpc_asyncio", + request_type=featurestore_service.SearchFeaturesRequest, +): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.SearchFeaturesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_search_features_async_from_dict(): + await test_search_features_async(request_type=dict) + + +def test_search_features_field_headers(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + request.location = "location/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + call.return_value = featurestore_service.SearchFeaturesResponse() + + client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "location=location/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_search_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + request.location = "location/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.SearchFeaturesResponse() + ) + + await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "location=location/value",) in kw["metadata"] + + +def test_search_features_flattened(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_features(location="location_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].location == "location_value" + + +def test_search_features_flattened_error(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_features( + featurestore_service.SearchFeaturesRequest(), location="location_value", + ) + + +@pytest.mark.asyncio +async def test_search_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + featurestore_service.SearchFeaturesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_features(location="location_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].location == "location_value" + + +@pytest.mark.asyncio +async def test_search_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_features( + featurestore_service.SearchFeaturesRequest(), location="location_value", + ) + + +def test_search_features_pager(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", ""),)), + ) + pager = client.search_features(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) for i in results) + + +def test_search_features_pages(): + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.search_features), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = list(client.search_features(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_search_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + async_pager = await client.search_features(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) for i in responses) + + +@pytest.mark.asyncio +async def test_search_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(), feature.Feature(),], + next_page_token="abc", + ), + featurestore_service.SearchFeaturesResponse( + features=[], next_page_token="def", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(),], next_page_token="ghi", + ), + featurestore_service.SearchFeaturesResponse( + features=[feature.Feature(), feature.Feature(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_features(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = FeaturestoreServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.FeaturestoreServiceGrpcTransport,) + + +def test_featurestore_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_featurestore_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.FeaturestoreServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_featurestore", + "get_featurestore", + "list_featurestores", + "update_featurestore", + "delete_featurestore", + "create_entity_type", + "get_entity_type", + "list_entity_types", + "update_entity_type", + "delete_entity_type", + "create_feature", + "batch_create_features", + "get_feature", + "list_features", + "update_feature", + "delete_feature", + "import_feature_values", + "batch_read_feature_values", + "export_feature_values", + "search_features", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_featurestore_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_featurestore_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport() + adc.assert_called_once() + + +def test_featurestore_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + FeaturestoreServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_featurestore_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.FeaturestoreServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_featurestore_service_host_no_port(): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_featurestore_service_host_with_port(): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_featurestore_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_featurestore_service_grpc_lro_client(): + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_featurestore_service_grpc_lro_async_client(): + client = FeaturestoreServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + ) + actual = FeaturestoreServiceClient.entity_type_path( + project, location, featurestore, entity_type + ) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_entity_type_path(path) + assert expected == actual + + +def test_feature_path(): + project = "winkle" + location = "nautilus" + featurestore = "scallop" + entity_type = "abalone" + feature = "squid" + + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format( + project=project, + location=location, + featurestore=featurestore, + entity_type=entity_type, + feature=feature, + ) + actual = FeaturestoreServiceClient.feature_path( + project, location, featurestore, entity_type, feature + ) + assert expected == actual + + +def test_parse_feature_path(): + expected = { + "project": "clam", + "location": "whelk", + "featurestore": "octopus", + "entity_type": "oyster", + "feature": "nudibranch", + } + path = FeaturestoreServiceClient.feature_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_feature_path(path) + assert expected == actual + + +def test_featurestore_path(): + project = "cuttlefish" + location = "mussel" + featurestore = "winkle" + + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format( + project=project, location=location, featurestore=featurestore, + ) + actual = FeaturestoreServiceClient.featurestore_path( + project, location, featurestore + ) + assert expected == actual + + +def test_parse_featurestore_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "featurestore": "abalone", + } + path = FeaturestoreServiceClient.featurestore_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_featurestore_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = FeaturestoreServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = FeaturestoreServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = FeaturestoreServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + + expected = "organizations/{organization}".format(organization=organization,) + actual = FeaturestoreServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = FeaturestoreServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + + expected = "projects/{project}".format(project=project,) + actual = FeaturestoreServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = FeaturestoreServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = FeaturestoreServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = FeaturestoreServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.FeaturestoreServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = FeaturestoreServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.FeaturestoreServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = FeaturestoreServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py new file mode 100644 index 0000000000..9580632c24 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -0,0 +1,2904 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( + IndexEndpointServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( + IndexEndpointServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import transports +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexEndpointServiceClient._get_default_mtls_endpoint(None) is None + assert ( + IndexEndpointServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + IndexEndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + IndexEndpointServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [IndexEndpointServiceClient, IndexEndpointServiceAsyncClient,] +) +def test_index_endpoint_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [IndexEndpointServiceClient, IndexEndpointServiceAsyncClient,] +) +def test_index_endpoint_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_index_endpoint_service_client_get_transport_class(): + transport = IndexEndpointServiceClient.get_transport_class() + available_transports = [ + transports.IndexEndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexEndpointServiceClient.get_transport_class("grpc") + assert transport == transports.IndexEndpointServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + IndexEndpointServiceClient, + transports.IndexEndpointServiceGrpcTransport, + "grpc", + ), + ( + IndexEndpointServiceAsyncClient, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + IndexEndpointServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(IndexEndpointServiceClient), +) +@mock.patch.object( + IndexEndpointServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(IndexEndpointServiceAsyncClient), +) +def test_index_endpoint_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexEndpointServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexEndpointServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + IndexEndpointServiceClient, + transports.IndexEndpointServiceGrpcTransport, + "grpc", + "true", + ), + ( + IndexEndpointServiceAsyncClient, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + IndexEndpointServiceClient, + transports.IndexEndpointServiceGrpcTransport, + "grpc", + "false", + ), + ( + IndexEndpointServiceAsyncClient, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + IndexEndpointServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(IndexEndpointServiceClient), +) +@mock.patch.object( + IndexEndpointServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(IndexEndpointServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_endpoint_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + IndexEndpointServiceClient, + transports.IndexEndpointServiceGrpcTransport, + "grpc", + ), + ( + IndexEndpointServiceAsyncClient, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_index_endpoint_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + IndexEndpointServiceClient, + transports.IndexEndpointServiceGrpcTransport, + "grpc", + ), + ( + IndexEndpointServiceAsyncClient, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_index_endpoint_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_index_endpoint_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = IndexEndpointServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_index_endpoint( + transport: str = "grpc", + request_type=index_endpoint_service.CreateIndexEndpointRequest, +): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_endpoint_from_dict(): + test_create_index_endpoint(request_type=dict) + + +def test_create_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), "__call__" + ) as call: + client.create_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_create_index_endpoint_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.CreateIndexEndpointRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_endpoint_async_from_dict(): + await test_create_index_endpoint_async(request_type=dict) + + +def test_create_index_endpoint_field_headers(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_index_endpoint_flattened(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index_endpoint( + parent="parent_value", + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint( + name="name_value" + ) + + +def test_create_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent="parent_value", + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index_endpoint( + parent="parent_value", + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent="parent_value", + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + ) + + +def test_get_index_endpoint( + transport: str = "grpc", request_type=index_endpoint_service.GetIndexEndpointRequest +): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + network="network_value", + ) + + response = client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, index_endpoint.IndexEndpoint) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.network == "network_value" + + +def test_get_index_endpoint_from_dict(): + test_get_index_endpoint(request_type=dict) + + +def test_get_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), "__call__" + ) as call: + client.get_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_get_index_endpoint_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.GetIndexEndpointRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_endpoint.IndexEndpoint( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + network="network_value", + ) + ) + + response = await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index_endpoint.IndexEndpoint) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_get_index_endpoint_async_from_dict(): + await test_get_index_endpoint_async(request_type=dict) + + +def test_get_index_endpoint_field_headers(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), "__call__" + ) as call: + call.return_value = index_endpoint.IndexEndpoint() + + client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_endpoint.IndexEndpoint() + ) + + await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_index_endpoint_flattened(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index_endpoint(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_endpoint.IndexEndpoint() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index_endpoint(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), name="name_value", + ) + + +def test_list_index_endpoints( + transport: str = "grpc", + request_type=index_endpoint_service.ListIndexEndpointsRequest, +): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListIndexEndpointsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_index_endpoints_from_dict(): + test_list_index_endpoints(request_type=dict) + + +def test_list_index_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + client.list_index_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.ListIndexEndpointsRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_endpoint_service.ListIndexEndpointsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexEndpointsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_from_dict(): + await test_list_index_endpoints_async(request_type=dict) + + +def test_list_index_endpoints_field_headers(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + + client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_index_endpoints_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_endpoint_service.ListIndexEndpointsResponse() + ) + + await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_index_endpoints_flattened(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_index_endpoints(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_index_endpoints_flattened_error(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_endpoint_service.ListIndexEndpointsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_index_endpoints(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), parent="parent_value", + ) + + +def test_list_index_endpoints_pager(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token="abc", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], next_page_token="def", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[index_endpoint.IndexEndpoint(),], + next_page_token="ghi", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_index_endpoints(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) for i in results) + + +def test_list_index_endpoints_pages(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token="abc", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], next_page_token="def", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[index_endpoint.IndexEndpoint(),], + next_page_token="ghi", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_index_endpoints(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pager(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token="abc", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], next_page_token="def", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[index_endpoint.IndexEndpoint(),], + next_page_token="ghi", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_index_endpoints(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) for i in responses) + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pages(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token="abc", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], next_page_token="def", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[index_endpoint.IndexEndpoint(),], + next_page_token="ghi", + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_index_endpoints(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_index_endpoint( + transport: str = "grpc", + request_type=index_endpoint_service.UpdateIndexEndpointRequest, +): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + network="network_value", + ) + + response = client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.network == "network_value" + + +def test_update_index_endpoint_from_dict(): + test_update_index_endpoint(request_type=dict) + + +def test_update_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), "__call__" + ) as call: + client.update_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_update_index_endpoint_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.UpdateIndexEndpointRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_index_endpoint.IndexEndpoint( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + network="network_value", + ) + ) + + response = await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_update_index_endpoint_async_from_dict(): + await test_update_index_endpoint_async(request_type=dict) + + +def test_update_index_endpoint_field_headers(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + request.index_endpoint.name = "index_endpoint.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), "__call__" + ) as call: + call.return_value = gca_index_endpoint.IndexEndpoint() + + client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "index_endpoint.name=index_endpoint.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + request.index_endpoint.name = "index_endpoint.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_index_endpoint.IndexEndpoint() + ) + + await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "index_endpoint.name=index_endpoint.name/value", + ) in kw["metadata"] + + +def test_update_index_endpoint_flattened(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_index_endpoint.IndexEndpoint() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_index_endpoint( + transport: str = "grpc", + request_type=index_endpoint_service.DeleteIndexEndpointRequest, +): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_endpoint_from_dict(): + test_delete_index_endpoint(request_type=dict) + + +def test_delete_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), "__call__" + ) as call: + client.delete_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.DeleteIndexEndpointRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async_from_dict(): + await test_delete_index_endpoint_async(request_type=dict) + + +def test_delete_index_endpoint_field_headers(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_index_endpoint_flattened(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index_endpoint(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index_endpoint(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), name="name_value", + ) + + +def test_deploy_index( + transport: str = "grpc", request_type=index_endpoint_service.DeployIndexRequest +): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_index_from_dict(): + test_deploy_index(request_type=dict) + + +def test_deploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_index), "__call__") as call: + client.deploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.DeployIndexRequest() + + +@pytest.mark.asyncio +async def test_deploy_index_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.DeployIndexRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_index_async_from_dict(): + await test_deploy_index_async(request_type=dict) + + +def test_deploy_index_field_headers(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_index), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_deploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_index), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +def test_deploy_index_flattened(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].index_endpoint == "index_endpoint_value" + + assert args[0].deployed_index == gca_index_endpoint.DeployedIndex(id="id_value") + + +def test_deploy_index_flattened_error(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_deploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].index_endpoint == "index_endpoint_value" + + assert args[0].deployed_index == gca_index_endpoint.DeployedIndex(id="id_value") + + +@pytest.mark.asyncio +async def test_deploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + +def test_undeploy_index( + transport: str = "grpc", request_type=index_endpoint_service.UndeployIndexRequest +): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_index_from_dict(): + test_undeploy_index(request_type=dict) + + +def test_undeploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_index), "__call__") as call: + client.undeploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + +@pytest.mark.asyncio +async def test_undeploy_index_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.UndeployIndexRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_index_async_from_dict(): + await test_undeploy_index_async(request_type=dict) + + +def test_undeploy_index_field_headers(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_index), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_undeploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_index), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +def test_undeploy_index_flattened(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_index( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].index_endpoint == "index_endpoint_value" + + assert args[0].deployed_index_id == "deployed_index_id_value" + + +def test_undeploy_index_flattened_error(): + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_index( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].index_endpoint == "index_endpoint_value" + + assert args[0].deployed_index_id == "deployed_index_id_value" + + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = IndexEndpointServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexEndpointServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.IndexEndpointServiceGrpcTransport,) + + +def test_index_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.IndexEndpointServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_index_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.IndexEndpointServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_index_endpoint", + "get_index_endpoint", + "list_index_endpoints", + "update_index_endpoint", + "delete_index_endpoint", + "deploy_index", + "undeploy_index", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_index_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_index_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport() + adc.assert_called_once() + + +def test_index_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + IndexEndpointServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_index_endpoint_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.IndexEndpointServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_index_endpoint_service_host_no_port(): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_index_endpoint_service_host_with_port(): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_index_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +def test_index_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +def test_index_endpoint_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_endpoint_service_grpc_lro_client(): + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_endpoint_service_grpc_lro_async_client(): + client = IndexEndpointServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + + expected = "projects/{project}/locations/{location}/indexes/{index}".format( + project=project, location=location, index=index, + ) + actual = IndexEndpointServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexEndpointServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_path(path) + assert expected == actual + + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format( + project=project, location=location, index_endpoint=index_endpoint, + ) + actual = IndexEndpointServiceClient.index_endpoint_path( + project, location, index_endpoint + ) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexEndpointServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_endpoint_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = IndexEndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexEndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = IndexEndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexEndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + + expected = "organizations/{organization}".format(organization=organization,) + actual = IndexEndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexEndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + + expected = "projects/{project}".format(project=project,) + actual = IndexEndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexEndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = IndexEndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexEndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.IndexEndpointServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = IndexEndpointServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.IndexEndpointServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = IndexEndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py new file mode 100644 index 0000000000..5d9586883e --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -0,0 +1,2170 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.index_service import ( + IndexServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceClient +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.services.index_service import transports +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexServiceClient._get_default_mtls_endpoint(None) is None + assert ( + IndexServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + ) + assert ( + IndexServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + IndexServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + IndexServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert IndexServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [IndexServiceClient, IndexServiceAsyncClient,]) +def test_index_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize("client_class", [IndexServiceClient, IndexServiceAsyncClient,]) +def test_index_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_index_service_client_get_transport_class(): + transport = IndexServiceClient.get_transport_class() + available_transports = [ + transports.IndexServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexServiceClient.get_transport_class("grpc") + assert transport == transports.IndexServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + ( + IndexServiceAsyncClient, + transports.IndexServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient) +) +@mock.patch.object( + IndexServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(IndexServiceAsyncClient), +) +def test_index_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "true"), + ( + IndexServiceAsyncClient, + transports.IndexServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "false"), + ( + IndexServiceAsyncClient, + transports.IndexServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient) +) +@mock.patch.object( + IndexServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(IndexServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + ( + IndexServiceAsyncClient, + transports.IndexServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_index_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + ( + IndexServiceAsyncClient, + transports.IndexServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_index_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_index_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = IndexServiceClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_index( + transport: str = "grpc", request_type=index_service.CreateIndexRequest +): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_from_dict(): + test_create_index(request_type=dict) + + +def test_create_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_index), "__call__") as call: + client.create_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.CreateIndexRequest() + + +@pytest.mark.asyncio +async def test_create_index_async( + transport: str = "grpc_asyncio", request_type=index_service.CreateIndexRequest +): + client = IndexServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_async_from_dict(): + await test_create_index_async(request_type=dict) + + +def test_create_index_field_headers(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_index), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_index_field_headers_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_index), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_index_flattened(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index( + parent="parent_value", index=gca_index.Index(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].index == gca_index.Index(name="name_value") + + +def test_create_index_flattened_error(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index( + index_service.CreateIndexRequest(), + parent="parent_value", + index=gca_index.Index(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_index_flattened_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index( + parent="parent_value", index=gca_index.Index(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].index == gca_index.Index(name="name_value") + + +@pytest.mark.asyncio +async def test_create_index_flattened_error_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index( + index_service.CreateIndexRequest(), + parent="parent_value", + index=gca_index.Index(name="name_value"), + ) + + +def test_get_index(transport: str = "grpc", request_type=index_service.GetIndexRequest): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index( + name="name_value", + display_name="display_name_value", + description="description_value", + metadata_schema_uri="metadata_schema_uri_value", + etag="etag_value", + ) + + response = client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, index.Index) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.metadata_schema_uri == "metadata_schema_uri_value" + + assert response.etag == "etag_value" + + +def test_get_index_from_dict(): + test_get_index(request_type=dict) + + +def test_get_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_index), "__call__") as call: + client.get_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.GetIndexRequest() + + +@pytest.mark.asyncio +async def test_get_index_async( + transport: str = "grpc_asyncio", request_type=index_service.GetIndexRequest +): + client = IndexServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index.Index( + name="name_value", + display_name="display_name_value", + description="description_value", + metadata_schema_uri="metadata_schema_uri_value", + etag="etag_value", + ) + ) + + response = await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index.Index) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.metadata_schema_uri == "metadata_schema_uri_value" + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_index_async_from_dict(): + await test_get_index_async(request_type=dict) + + +def test_get_index_field_headers(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_index), "__call__") as call: + call.return_value = index.Index() + + client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_index_field_headers_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_index), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + + await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_index_flattened(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_index_flattened_error(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index( + index_service.GetIndexRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_index_flattened_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_index_flattened_error_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index( + index_service.GetIndexRequest(), name="name_value", + ) + + +def test_list_indexes( + transport: str = "grpc", request_type=index_service.ListIndexesRequest +): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListIndexesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_indexes_from_dict(): + test_list_indexes(request_type=dict) + + +def test_list_indexes_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + client.list_indexes() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.ListIndexesRequest() + + +@pytest.mark.asyncio +async def test_list_indexes_async( + transport: str = "grpc_asyncio", request_type=index_service.ListIndexesRequest +): + client = IndexServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_service.ListIndexesResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_indexes_async_from_dict(): + await test_list_indexes_async(request_type=dict) + + +def test_list_indexes_field_headers(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + call.return_value = index_service.ListIndexesResponse() + + client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_indexes_field_headers_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_service.ListIndexesResponse() + ) + + await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_indexes_flattened(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_indexes(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_indexes_flattened_error(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_indexes( + index_service.ListIndexesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_indexes_flattened_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + index_service.ListIndexesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_indexes(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_indexes_flattened_error_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_indexes( + index_service.ListIndexesRequest(), parent="parent_value", + ) + + +def test_list_indexes_pager(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[index.Index(), index.Index(), index.Index(),], + next_page_token="abc", + ), + index_service.ListIndexesResponse(indexes=[], next_page_token="def",), + index_service.ListIndexesResponse( + indexes=[index.Index(),], next_page_token="ghi", + ), + index_service.ListIndexesResponse(indexes=[index.Index(), index.Index(),],), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_indexes(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, index.Index) for i in results) + + +def test_list_indexes_pages(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_indexes), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[index.Index(), index.Index(), index.Index(),], + next_page_token="abc", + ), + index_service.ListIndexesResponse(indexes=[], next_page_token="def",), + index_service.ListIndexesResponse( + indexes=[index.Index(),], next_page_token="ghi", + ), + index_service.ListIndexesResponse(indexes=[index.Index(), index.Index(),],), + RuntimeError, + ) + pages = list(client.list_indexes(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_indexes_async_pager(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[index.Index(), index.Index(), index.Index(),], + next_page_token="abc", + ), + index_service.ListIndexesResponse(indexes=[], next_page_token="def",), + index_service.ListIndexesResponse( + indexes=[index.Index(),], next_page_token="ghi", + ), + index_service.ListIndexesResponse(indexes=[index.Index(), index.Index(),],), + RuntimeError, + ) + async_pager = await client.list_indexes(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index.Index) for i in responses) + + +@pytest.mark.asyncio +async def test_list_indexes_async_pages(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[index.Index(), index.Index(), index.Index(),], + next_page_token="abc", + ), + index_service.ListIndexesResponse(indexes=[], next_page_token="def",), + index_service.ListIndexesResponse( + indexes=[index.Index(),], next_page_token="ghi", + ), + index_service.ListIndexesResponse(indexes=[index.Index(), index.Index(),],), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_indexes(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_index( + transport: str = "grpc", request_type=index_service.UpdateIndexRequest +): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_index_from_dict(): + test_update_index(request_type=dict) + + +def test_update_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_index), "__call__") as call: + client.update_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.UpdateIndexRequest() + + +@pytest.mark.asyncio +async def test_update_index_async( + transport: str = "grpc_asyncio", request_type=index_service.UpdateIndexRequest +): + client = IndexServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_index_async_from_dict(): + await test_update_index_async(request_type=dict) + + +def test_update_index_field_headers(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + request.index.name = "index.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_index), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index.name=index.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_index_field_headers_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + request.index.name = "index.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_index), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index.name=index.name/value",) in kw["metadata"] + + +def test_update_index_flattened(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index( + index=gca_index.Index(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].index == gca_index.Index(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_index_flattened_error(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_index_flattened_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index( + index=gca_index.Index(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].index == gca_index.Index(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_index_flattened_error_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_index( + transport: str = "grpc", request_type=index_service.DeleteIndexRequest +): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_from_dict(): + test_delete_index(request_type=dict) + + +def test_delete_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_index), "__call__") as call: + client.delete_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.DeleteIndexRequest() + + +@pytest.mark.asyncio +async def test_delete_index_async( + transport: str = "grpc_asyncio", request_type=index_service.DeleteIndexRequest +): + client = IndexServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_async_from_dict(): + await test_delete_index_async(request_type=dict) + + +def test_delete_index_field_headers(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_index), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_index_field_headers_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_index), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_index_flattened(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_index_flattened_error(): + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index( + index_service.DeleteIndexRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_index_flattened_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_index), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_index_flattened_error_async(): + client = IndexServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index( + index_service.DeleteIndexRequest(), name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = IndexServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.IndexServiceGrpcTransport,) + + +def test_index_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.IndexServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_index_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.IndexServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_index", + "get_index", + "list_indexes", + "update_index", + "delete_index", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_index_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_index_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport() + adc.assert_called_once() + + +def test_index_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + IndexServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_index_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.IndexServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport], +) +def test_index_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_index_service_host_no_port(): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_index_service_host_with_port(): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_index_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport], +) +def test_index_service_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport], +) +def test_index_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_service_grpc_lro_client(): + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_service_grpc_lro_async_client(): + client = IndexServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + + expected = "projects/{project}/locations/{location}/indexes/{index}".format( + project=project, location=location, index=index, + ) + actual = IndexServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_path(path) + assert expected == actual + + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format( + project=project, location=location, index_endpoint=index_endpoint, + ) + actual = IndexServiceClient.index_endpoint_path(project, location, index_endpoint) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_endpoint_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = IndexServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = IndexServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + + expected = "organizations/{organization}".format(organization=organization,) + actual = IndexServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + + expected = "projects/{project}".format(project=project,) + actual = IndexServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = IndexServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.IndexServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = IndexServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.IndexServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = IndexServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index e230d9f4b8..6acb3e7b86 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -63,6 +63,11 @@ from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_deployment_monitoring_job as gca_model_deployment_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study from google.longrunning import operations_pb2 @@ -117,9 +122,7 @@ def test__get_default_mtls_endpoint(): assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -@pytest.mark.parametrize( - "client_class", [JobServiceClient, JobServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,]) def test_job_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( @@ -134,9 +137,7 @@ def test_job_service_client_from_service_account_info(client_class): assert client.transport._host == "aiplatform.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [JobServiceClient, JobServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,]) def test_job_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( @@ -5849,229 +5850,2629 @@ async def test_cancel_batch_prediction_job_flattened_error_async(): ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=credentials.AnonymousCredentials(), +def test_create_model_deployment_monitoring_job( + transport: str = "grpc", + request_type=job_service.CreateModelDeploymentMonitoringJobRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, ) - with pytest.raises(ValueError): - client = JobServiceClient( - credentials=credentials.AnonymousCredentials(), transport=transport, - ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # It is an error to provide scopes and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"scopes": ["1", "2"]}, transport=transport, + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value", + display_name="display_name_value", + endpoint="endpoint_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri="predict_instance_schema_uri_value", + analysis_instance_schema_uri="analysis_instance_schema_uri_value", ) + response = client.create_model_deployment_monitoring_job(request) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=credentials.AnonymousCredentials(), - ) - client = JobServiceClient(transport=transport) - assert client.transport is transport + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel + # Establish that the response is the type that we expect. - transport = transports.JobServiceGrpcAsyncIOTransport( - credentials=credentials.AnonymousCredentials(), + assert isinstance( + response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob ) - channel = transport.grpc_channel - assert channel + assert response.name == "name_value" -@pytest.mark.parametrize( - "transport_class", - [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport,], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + assert response.display_name == "display_name_value" + assert response.endpoint == "endpoint_value" -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) - assert isinstance(client.transport, transports.JobServiceGrpcTransport,) + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert ( + response.schedule_state + == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + ) -def test_job_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(exceptions.DuplicateCredentialArgs): - transport = transports.JobServiceTransport( - credentials=credentials.AnonymousCredentials(), - credentials_file="credentials.json", - ) + assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" + assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" -def test_job_service_base_transport(): - # Instantiate the base transport. - with mock.patch( - "google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport.__init__" - ) as Transport: - Transport.return_value = None - transport = transports.JobServiceTransport( - credentials=credentials.AnonymousCredentials(), - ) - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - "create_custom_job", - "get_custom_job", - "list_custom_jobs", - "delete_custom_job", - "cancel_custom_job", - "create_data_labeling_job", - "get_data_labeling_job", - "list_data_labeling_jobs", - "delete_data_labeling_job", - "cancel_data_labeling_job", - "create_hyperparameter_tuning_job", - "get_hyperparameter_tuning_job", - "list_hyperparameter_tuning_jobs", - "delete_hyperparameter_tuning_job", - "cancel_hyperparameter_tuning_job", - "create_batch_prediction_job", - "get_batch_prediction_job", - "list_batch_prediction_jobs", - "delete_batch_prediction_job", - "cancel_batch_prediction_job", +def test_create_model_deployment_monitoring_job_from_dict(): + test_create_model_deployment_monitoring_job(request_type=dict) + + +def test_create_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), "__call__" + ) as call: + client.create_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() -def test_job_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=job_service.CreateModelDeploymentMonitoringJobRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - auth, "load_credentials_from_file" - ) as load_creds, mock.patch( - "google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - load_creds.return_value = (credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport( - credentials_file="credentials.json", quota_project_id="octopus", - ) - load_creds.assert_called_once_with( - "credentials.json", - scopes=("https://www.googleapis.com/auth/cloud-platform",), - quota_project_id="octopus", + type(client.transport.create_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value", + display_name="display_name_value", + endpoint="endpoint_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri="predict_instance_schema_uri_value", + analysis_instance_schema_uri="analysis_instance_schema_uri_value", + ) ) + response = await client.create_model_deployment_monitoring_job(request) -def test_job_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(auth, "default") as adc, mock.patch( - "google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - adc.return_value = (credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport() - adc.assert_called_once() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() -def test_job_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) - JobServiceClient() - adc.assert_called_once_with( - scopes=("https://www.googleapis.com/auth/cloud-platform",), - quota_project_id=None, - ) + # Establish that the response is the type that we expect. + assert isinstance( + response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob + ) + assert response.name == "name_value" -def test_job_service_transport_auth_adc(): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) - transports.JobServiceGrpcTransport( - host="squid.clam.whelk", quota_project_id="octopus" - ) - adc.assert_called_once_with( - scopes=("https://www.googleapis.com/auth/cloud-platform",), - quota_project_id="octopus", - ) + assert response.display_name == "display_name_value" + assert response.endpoint == "endpoint_value" -@pytest.mark.parametrize( - "transport_class", - [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport], -) -def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class): - cred = credentials.AnonymousCredentials() + assert response.state == job_state.JobState.JOB_STATE_QUEUED - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds, - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) + assert ( + response.schedule_state + == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + ) - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback, - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, private_key=expected_key - ) + assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" + assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" -def test_job_service_host_no_port(): + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async_from_dict(): + await test_create_model_deployment_monitoring_job_async(request_type=dict) + + +def test_create_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = ( + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + await client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_model_deployment_monitoring_job_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model_deployment_monitoring_job( + parent="parent_value", + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[ + 0 + ].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ) + + +def test_create_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent="parent_value", + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + ) + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model_deployment_monitoring_job( + parent="parent_value", + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[ + 0 + ].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent="parent_value", + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + ) + + +def test_search_model_deployment_monitoring_stats_anomalies( + transport: str = "grpc", + request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token="next_page_token_value", + ) + + response = client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert ( + args[0] + == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + ) + + # Establish that the response is the type that we expect. + + assert isinstance( + response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager + ) + + assert response.next_page_token == "next_page_token_value" + + +def test_search_model_deployment_monitoring_stats_anomalies_from_dict(): + test_search_model_deployment_monitoring_stats_anomalies(request_type=dict) + + +def test_search_model_deployment_monitoring_stats_anomalies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + client.search_model_deployment_monitoring_stats_anomalies() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert ( + args[0] + == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + ) + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async( + transport: str = "grpc_asyncio", + request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.search_model_deployment_monitoring_stats_anomalies( + request + ) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert ( + args[0] + == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance( + response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager + ) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict(): + await test_search_model_deployment_monitoring_stats_anomalies_async( + request_type=dict + ) + + +def test_search_model_deployment_monitoring_stats_anomalies_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + request.model_deployment_monitoring_job = "model_deployment_monitoring_job/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + call.return_value = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + ) + + client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_deployment_monitoring_job=model_deployment_monitoring_job/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + request.model_deployment_monitoring_job = "model_deployment_monitoring_job/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + ) + + await client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_deployment_monitoring_job=model_deployment_monitoring_job/value", + ) in kw["metadata"] + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert ( + args[0].model_deployment_monitoring_job + == "model_deployment_monitoring_job_value" + ) + + assert args[0].deployed_model_id == "deployed_model_id_value" + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert ( + args[0].model_deployment_monitoring_job + == "model_deployment_monitoring_job_value" + ) + + assert args[0].deployed_model_id == "deployed_model_id_value" + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + +def test_search_model_deployment_monitoring_stats_anomalies_pager(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="abc", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], next_page_token="def", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="ghi", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model_deployment_monitoring_job", ""),) + ), + ) + pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance( + i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies + ) + for i in results + ) + + +def test_search_model_deployment_monitoring_stats_anomalies_pages(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="abc", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], next_page_token="def", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="ghi", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = list( + client.search_model_deployment_monitoring_stats_anomalies(request={}).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="abc", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], next_page_token="def", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="ghi", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_model_deployment_monitoring_stats_anomalies( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance( + i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies + ) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="abc", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], next_page_token="def", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token="ghi", + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.search_model_deployment_monitoring_stats_anomalies(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_model_deployment_monitoring_job( + transport: str = "grpc", + request_type=job_service.GetModelDeploymentMonitoringJobRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value", + display_name="display_name_value", + endpoint="endpoint_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri="predict_instance_schema_uri_value", + analysis_instance_schema_uri="analysis_instance_schema_uri_value", + ) + + response = client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + + assert isinstance( + response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob + ) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.endpoint == "endpoint_value" + + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + assert ( + response.schedule_state + == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + ) + + assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" + + assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + + +def test_get_model_deployment_monitoring_job_from_dict(): + test_get_model_deployment_monitoring_job(request_type=dict) + + +def test_get_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), "__call__" + ) as call: + client.get_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=job_service.GetModelDeploymentMonitoringJobRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value", + display_name="display_name_value", + endpoint="endpoint_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri="predict_instance_schema_uri_value", + analysis_instance_schema_uri="analysis_instance_schema_uri_value", + ) + ) + + response = await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob + ) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.endpoint == "endpoint_value" + + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + assert ( + response.schedule_state + == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + ) + + assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value" + + assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value" + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async_from_dict(): + await test_get_model_deployment_monitoring_job_async(request_type=dict) + + +def test_get_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = ( + model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_model_deployment_monitoring_job_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_deployment_monitoring_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_deployment_monitoring_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +def test_list_model_deployment_monitoring_jobs( + transport: str = "grpc", + request_type=job_service.ListModelDeploymentMonitoringJobsRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_deployment_monitoring_jobs_from_dict(): + test_list_model_deployment_monitoring_jobs(request_type=dict) + + +def test_list_model_deployment_monitoring_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + client.list_model_deployment_monitoring_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async( + transport: str = "grpc_asyncio", + request_type=job_service.ListModelDeploymentMonitoringJobsRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_from_dict(): + await test_list_model_deployment_monitoring_jobs_async(request_type=dict) + + +def test_list_model_deployment_monitoring_jobs_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + + client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + job_service.ListModelDeploymentMonitoringJobsResponse() + ) + + await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_model_deployment_monitoring_jobs_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_deployment_monitoring_jobs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_model_deployment_monitoring_jobs_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + job_service.ListModelDeploymentMonitoringJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_deployment_monitoring_jobs( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent="parent_value", + ) + + +def test_list_model_deployment_monitoring_jobs_pager(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="abc", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], next_page_token="def", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="ghi", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_model_deployment_monitoring_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in results + ) + + +def test_list_model_deployment_monitoring_jobs_pages(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="abc", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], next_page_token="def", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="ghi", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pager(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="abc", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], next_page_token="def", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="ghi", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_deployment_monitoring_jobs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pages(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="abc", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], next_page_token="def", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token="ghi", + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_model_deployment_monitoring_jobs(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_model_deployment_monitoring_job( + transport: str = "grpc", + request_type=job_service.UpdateModelDeploymentMonitoringJobRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_model_deployment_monitoring_job_from_dict(): + test_update_model_deployment_monitoring_job(request_type=dict) + + +def test_update_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), "__call__" + ) as call: + client.update_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=job_service.UpdateModelDeploymentMonitoringJobRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async_from_dict(): + await test_update_model_deployment_monitoring_job_async(request_type=dict) + + +def test_update_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + request.model_deployment_monitoring_job.name = ( + "model_deployment_monitoring_job.name/value" + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + request.model_deployment_monitoring_job.name = ( + "model_deployment_monitoring_job.name/value" + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value", + ) in kw["metadata"] + + +def test_update_model_deployment_monitoring_job_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[ + 0 + ].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[ + 0 + ].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_model_deployment_monitoring_job( + transport: str = "grpc", + request_type=job_service.DeleteModelDeploymentMonitoringJobRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_deployment_monitoring_job_from_dict(): + test_delete_model_deployment_monitoring_job(request_type=dict) + + +def test_delete_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), "__call__" + ) as call: + client.delete_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=job_service.DeleteModelDeploymentMonitoringJobRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async_from_dict(): + await test_delete_model_deployment_monitoring_job_async(request_type=dict) + + +def test_delete_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_model_deployment_monitoring_job_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_deployment_monitoring_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_deployment_monitoring_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +def test_pause_model_deployment_monitoring_job( + transport: str = "grpc", + request_type=job_service.PauseModelDeploymentMonitoringJobRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_pause_model_deployment_monitoring_job_from_dict(): + test_pause_model_deployment_monitoring_job(request_type=dict) + + +def test_pause_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), "__call__" + ) as call: + client.pause_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=job_service.PauseModelDeploymentMonitoringJobRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async_from_dict(): + await test_pause_model_deployment_monitoring_job_async(request_type=dict) + + +def test_pause_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = None + + client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_pause_model_deployment_monitoring_job_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.pause_model_deployment_monitoring_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_pause_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.pause_model_deployment_monitoring_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +def test_resume_model_deployment_monitoring_job( + transport: str = "grpc", + request_type=job_service.ResumeModelDeploymentMonitoringJobRequest, +): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_resume_model_deployment_monitoring_job_from_dict(): + test_resume_model_deployment_monitoring_job(request_type=dict) + + +def test_resume_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), "__call__" + ) as call: + client.resume_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=job_service.ResumeModelDeploymentMonitoringJobRequest, +): + client = JobServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async_from_dict(): + await test_resume_model_deployment_monitoring_job_async(request_type=dict) + + +def test_resume_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = None + + client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_resume_model_deployment_monitoring_job_flattened(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.resume_model_deployment_monitoring_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_resume_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.resume_model_deployment_monitoring_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = JobServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport,], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.JobServiceGrpcTransport,) + + +def test_job_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.JobServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_job_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.JobServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_custom_job", + "get_custom_job", + "list_custom_jobs", + "delete_custom_job", + "cancel_custom_job", + "create_data_labeling_job", + "get_data_labeling_job", + "list_data_labeling_jobs", + "delete_data_labeling_job", + "cancel_data_labeling_job", + "create_hyperparameter_tuning_job", + "get_hyperparameter_tuning_job", + "list_hyperparameter_tuning_jobs", + "delete_hyperparameter_tuning_job", + "cancel_hyperparameter_tuning_job", + "create_batch_prediction_job", + "get_batch_prediction_job", + "list_batch_prediction_jobs", + "delete_batch_prediction_job", + "cancel_batch_prediction_job", + "create_model_deployment_monitoring_job", + "search_model_deployment_monitoring_stats_anomalies", + "get_model_deployment_monitoring_job", + "list_model_deployment_monitoring_jobs", + "update_model_deployment_monitoring_job", + "delete_model_deployment_monitoring_job", + "pause_model_deployment_monitoring_job", + "resume_model_deployment_monitoring_job", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_job_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_job_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport() + adc.assert_called_once() + + +def test_job_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + JobServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_job_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.JobServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport], +) +def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_job_service_host_no_port(): client = JobServiceClient( credentials=credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( @@ -6337,10 +8738,35 @@ def test_parse_dataset_path(): assert expected == actual -def test_hyperparameter_tuning_job_path(): +def test_endpoint_path(): project = "squid" location = "clam" - hyperparameter_tuning_job = "whelk" + endpoint = "whelk" + + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, location=location, endpoint=endpoint, + ) + actual = JobServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = JobServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_endpoint_path(path) + assert expected == actual + + +def test_hyperparameter_tuning_job_path(): + project = "cuttlefish" + location = "mussel" + hyperparameter_tuning_job = "winkle" expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format( project=project, @@ -6355,9 +8781,9 @@ def test_hyperparameter_tuning_job_path(): def test_parse_hyperparameter_tuning_job_path(): expected = { - "project": "octopus", - "location": "oyster", - "hyperparameter_tuning_job": "nudibranch", + "project": "nautilus", + "location": "scallop", + "hyperparameter_tuning_job": "abalone", } path = JobServiceClient.hyperparameter_tuning_job_path(**expected) @@ -6367,9 +8793,9 @@ def test_parse_hyperparameter_tuning_job_path(): def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" + project = "squid" + location = "clam" + model = "whelk" expected = "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, @@ -6380,9 +8806,9 @@ def test_model_path(): def test_parse_model_path(): expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", + "project": "octopus", + "location": "oyster", + "model": "nudibranch", } path = JobServiceClient.model_path(**expected) @@ -6391,11 +8817,88 @@ def test_parse_model_path(): assert expected == actual -def test_trial_path(): +def test_model_deployment_monitoring_job_path(): + project = "cuttlefish" + location = "mussel" + model_deployment_monitoring_job = "winkle" + + expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format( + project=project, + location=location, + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + actual = JobServiceClient.model_deployment_monitoring_job_path( + project, location, model_deployment_monitoring_job + ) + assert expected == actual + + +def test_parse_model_deployment_monitoring_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model_deployment_monitoring_job": "abalone", + } + path = JobServiceClient.model_deployment_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path) + assert expected == actual + + +def test_network_path(): project = "squid" - location = "clam" - study = "whelk" - trial = "octopus" + network = "clam" + + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = JobServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = JobServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_network_path(path) + assert expected == actual + + +def test_tensorboard_path(): + project = "oyster" + location = "nudibranch" + tensorboard = "cuttlefish" + + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format( + project=project, location=location, tensorboard=tensorboard, + ) + actual = JobServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + } + path = JobServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_tensorboard_path(path) + assert expected == actual + + +def test_trial_path(): + project = "scallop" + location = "abalone" + study = "squid" + trial = "clam" expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format( project=project, location=location, study=study, trial=trial, @@ -6406,10 +8909,10 @@ def test_trial_path(): def test_parse_trial_path(): expected = { - "project": "oyster", - "location": "nudibranch", - "study": "cuttlefish", - "trial": "mussel", + "project": "whelk", + "location": "octopus", + "study": "oyster", + "trial": "nudibranch", } path = JobServiceClient.trial_path(**expected) @@ -6419,7 +8922,7 @@ def test_parse_trial_path(): def test_common_billing_account_path(): - billing_account = "winkle" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -6430,7 +8933,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nautilus", + "billing_account": "mussel", } path = JobServiceClient.common_billing_account_path(**expected) @@ -6440,7 +8943,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "scallop" + folder = "winkle" expected = "folders/{folder}".format(folder=folder,) actual = JobServiceClient.common_folder_path(folder) @@ -6449,7 +8952,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "abalone", + "folder": "nautilus", } path = JobServiceClient.common_folder_path(**expected) @@ -6459,7 +8962,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "squid" + organization = "scallop" expected = "organizations/{organization}".format(organization=organization,) actual = JobServiceClient.common_organization_path(organization) @@ -6468,7 +8971,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "clam", + "organization": "abalone", } path = JobServiceClient.common_organization_path(**expected) @@ -6478,7 +8981,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "whelk" + project = "squid" expected = "projects/{project}".format(project=project,) actual = JobServiceClient.common_project_path(project) @@ -6487,7 +8990,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "octopus", + "project": "clam", } path = JobServiceClient.common_project_path(**expected) @@ -6497,8 +9000,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "oyster" - location = "nudibranch" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -6509,8 +9012,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "cuttlefish", - "location": "mussel", + "project": "oyster", + "location": "nudibranch", } path = JobServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py new file mode 100644 index 0000000000..45fd76e099 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -0,0 +1,8144 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.metadata_service import ( + MetadataServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.metadata_service import ( + MetadataServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.services.metadata_service import transports +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MetadataServiceClient._get_default_mtls_endpoint(None) is None + assert ( + MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [MetadataServiceClient, MetadataServiceAsyncClient,] +) +def test_metadata_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [MetadataServiceClient, MetadataServiceAsyncClient,] +) +def test_metadata_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_metadata_service_client_get_transport_class(): + transport = MetadataServiceClient.get_transport_class() + available_transports = [ + transports.MetadataServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MetadataServiceClient.get_transport_class("grpc") + assert transport == transports.MetadataServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + MetadataServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceClient), +) +@mock.patch.object( + MetadataServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceAsyncClient), +) +def test_metadata_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + MetadataServiceClient, + transports.MetadataServiceGrpcTransport, + "grpc", + "true", + ), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + MetadataServiceClient, + transports.MetadataServiceGrpcTransport, + "grpc", + "false", + ), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + MetadataServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceClient), +) +@mock.patch.object( + MetadataServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetadataServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_metadata_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metadata_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + ( + MetadataServiceAsyncClient, + transports.MetadataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metadata_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_metadata_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = MetadataServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_metadata_store( + transport: str = "grpc", request_type=metadata_service.CreateMetadataStoreRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_metadata_store_from_dict(): + test_create_metadata_store(request_type=dict) + + +def test_create_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + client.create_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_create_metadata_store_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.CreateMetadataStoreRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_metadata_store_async_from_dict(): + await test_create_metadata_store_async(request_type=dict) + + +def test_create_metadata_store_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_metadata_store_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_store( + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].metadata_store == gca_metadata_store.MetadataStore( + name="name_value" + ) + + assert args[0].metadata_store_id == "metadata_store_id_value" + + +def test_create_metadata_store_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_store( + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].metadata_store == gca_metadata_store.MetadataStore( + name="name_value" + ) + + assert args[0].metadata_store_id == "metadata_store_id_value" + + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent="parent_value", + metadata_store=gca_metadata_store.MetadataStore(name="name_value"), + metadata_store_id="metadata_store_id_value", + ) + + +def test_get_metadata_store( + transport: str = "grpc", request_type=metadata_service.GetMetadataStoreRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore( + name="name_value", description="description_value", + ) + + response = client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, metadata_store.MetadataStore) + + assert response.name == "name_value" + + assert response.description == "description_value" + + +def test_get_metadata_store_from_dict(): + test_get_metadata_store(request_type=dict) + + +def test_get_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + client.get_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_get_metadata_store_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.GetMetadataStoreRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_store.MetadataStore( + name="name_value", description="description_value", + ) + ) + + response = await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + + assert response.name == "name_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_metadata_store_async_from_dict(): + await test_get_metadata_store_async(request_type=dict) + + +def test_get_metadata_store_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + call.return_value = metadata_store.MetadataStore() + + client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_store.MetadataStore() + ) + + await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_metadata_store_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_metadata_store_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_store.MetadataStore() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), name="name_value", + ) + + +def test_list_metadata_stores( + transport: str = "grpc", request_type=metadata_service.ListMetadataStoresRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListMetadataStoresPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_metadata_stores_from_dict(): + test_list_metadata_stores(request_type=dict) + + +def test_list_metadata_stores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + client.list_metadata_stores() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListMetadataStoresRequest() + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.ListMetadataStoresRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataStoresResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_from_dict(): + await test_list_metadata_stores_async(request_type=dict) + + +def test_list_metadata_stores_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + call.return_value = metadata_service.ListMetadataStoresResponse() + + client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_metadata_stores_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataStoresResponse() + ) + + await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_metadata_stores_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_stores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_metadata_stores_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataStoresResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_stores(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), parent="parent_value", + ) + + +def test_list_metadata_stores_pager(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_metadata_stores(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) for i in results) + + +def test_list_metadata_stores_pages(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_stores(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pager(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_stores(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pages(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], next_page_token="def", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[metadata_store.MetadataStore(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_stores(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_metadata_store( + transport: str = "grpc", request_type=metadata_service.DeleteMetadataStoreRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_metadata_store_from_dict(): + test_delete_metadata_store(request_type=dict) + + +def test_delete_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + client.delete_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.DeleteMetadataStoreRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async_from_dict(): + await test_delete_metadata_store_async(request_type=dict) + + +def test_delete_metadata_store_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_metadata_store_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_metadata_store_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_metadata_store(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), name="name_value", + ) + + +def test_create_artifact( + transport: str = "grpc", request_type=metadata_service.CreateArtifactRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_artifact.Artifact) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.uri == "uri_value" + + assert response.etag == "etag_value" + + assert response.state == gca_artifact.Artifact.State.PENDING + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_create_artifact_from_dict(): + test_create_artifact(request_type=dict) + + +def test_create_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + client.create_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateArtifactRequest() + + +@pytest.mark.asyncio +async def test_create_artifact_async( + transport: str = "grpc_asyncio", request_type=metadata_service.CreateArtifactRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.uri == "uri_value" + + assert response.etag == "etag_value" + + assert response.state == gca_artifact.Artifact.State.PENDING + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_artifact_async_from_dict(): + await test_create_artifact_async(request_type=dict) + + +def test_create_artifact_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + call.return_value = gca_artifact.Artifact() + + client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_artifact_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + + await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_artifact_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_artifact( + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + + assert args[0].artifact_id == "artifact_id_value" + + +def test_create_artifact_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_artifact_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_artifact( + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + + assert args[0].artifact_id == "artifact_id_value" + + +@pytest.mark.asyncio +async def test_create_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent="parent_value", + artifact=gca_artifact.Artifact(name="name_value"), + artifact_id="artifact_id_value", + ) + + +def test_get_artifact( + transport: str = "grpc", request_type=metadata_service.GetArtifactRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, artifact.Artifact) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.uri == "uri_value" + + assert response.etag == "etag_value" + + assert response.state == artifact.Artifact.State.PENDING + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_get_artifact_from_dict(): + test_get_artifact(request_type=dict) + + +def test_get_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + client.get_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetArtifactRequest() + + +@pytest.mark.asyncio +async def test_get_artifact_async( + transport: str = "grpc_asyncio", request_type=metadata_service.GetArtifactRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.uri == "uri_value" + + assert response.etag == "etag_value" + + assert response.state == artifact.Artifact.State.PENDING + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_artifact_async_from_dict(): + await test_get_artifact_async(request_type=dict) + + +def test_get_artifact_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + call.return_value = artifact.Artifact() + + client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_artifact_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + + await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_artifact_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_artifact(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_artifact_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_artifact( + metadata_service.GetArtifactRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_artifact_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_artifact(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_artifact( + metadata_service.GetArtifactRequest(), name="name_value", + ) + + +def test_list_artifacts( + transport: str = "grpc", request_type=metadata_service.ListArtifactsRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListArtifactsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_artifacts_from_dict(): + test_list_artifacts(request_type=dict) + + +def test_list_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + client.list_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListArtifactsRequest() + + +@pytest.mark.asyncio +async def test_list_artifacts_async( + transport: str = "grpc_asyncio", request_type=metadata_service.ListArtifactsRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListArtifactsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_artifacts_async_from_dict(): + await test_list_artifacts_async(request_type=dict) + + +def test_list_artifacts_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + call.return_value = metadata_service.ListArtifactsResponse() + + client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListArtifactsResponse() + ) + + await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_artifacts_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_artifacts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_artifacts_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_artifacts( + metadata_service.ListArtifactsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListArtifactsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_artifacts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_artifacts( + metadata_service.ListArtifactsRequest(), parent="parent_value", + ) + + +def test_list_artifacts_pager(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_artifacts(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, artifact.Artifact) for i in results) + + +def test_list_artifacts_pages(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + pages = list(client.list_artifacts(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_artifacts_async_pager(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + async_pager = await client.list_artifacts(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, artifact.Artifact) for i in responses) + + +@pytest.mark.asyncio +async def test_list_artifacts_async_pages(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token="abc", + ), + metadata_service.ListArtifactsResponse( + artifacts=[], next_page_token="def", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(),], next_page_token="ghi", + ), + metadata_service.ListArtifactsResponse( + artifacts=[artifact.Artifact(), artifact.Artifact(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_artifacts(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_artifact( + transport: str = "grpc", request_type=metadata_service.UpdateArtifactRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_artifact.Artifact) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.uri == "uri_value" + + assert response.etag == "etag_value" + + assert response.state == gca_artifact.Artifact.State.PENDING + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_update_artifact_from_dict(): + test_update_artifact(request_type=dict) + + +def test_update_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + client.update_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateArtifactRequest() + + +@pytest.mark.asyncio +async def test_update_artifact_async( + transport: str = "grpc_asyncio", request_type=metadata_service.UpdateArtifactRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact( + name="name_value", + display_name="display_name_value", + uri="uri_value", + etag="etag_value", + state=gca_artifact.Artifact.State.PENDING, + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.uri == "uri_value" + + assert response.etag == "etag_value" + + assert response.state == gca_artifact.Artifact.State.PENDING + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_update_artifact_async_from_dict(): + await test_update_artifact_async(request_type=dict) + + +def test_update_artifact_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + request.artifact.name = "artifact.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + call.return_value = gca_artifact.Artifact() + + client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact.name=artifact.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_artifact_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + request.artifact.name = "artifact.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + + await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact.name=artifact.name/value",) in kw[ + "metadata" + ] + + +def test_update_artifact_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_artifact( + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_artifact_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_artifact_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_artifact), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_artifact.Artifact() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_artifact( + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].artifact == gca_artifact.Artifact(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_create_context( + transport: str = "grpc", request_type=metadata_service.CreateContextRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_context.Context) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.parent_contexts == ["parent_contexts_value"] + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_create_context_from_dict(): + test_create_context(request_type=dict) + + +def test_create_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + client.create_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateContextRequest() + + +@pytest.mark.asyncio +async def test_create_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.CreateContextRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.parent_contexts == ["parent_contexts_value"] + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_context_async_from_dict(): + await test_create_context_async(request_type=dict) + + +def test_create_context_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + call.return_value = gca_context.Context() + + client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_context_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + + await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_context_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_context( + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].context == gca_context.Context(name="name_value") + + assert args[0].context_id == "context_id_value" + + +def test_create_context_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_context( + metadata_service.CreateContextRequest(), + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_context_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_context( + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].context == gca_context.Context(name="name_value") + + assert args[0].context_id == "context_id_value" + + +@pytest.mark.asyncio +async def test_create_context_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_context( + metadata_service.CreateContextRequest(), + parent="parent_value", + context=gca_context.Context(name="name_value"), + context_id="context_id_value", + ) + + +def test_get_context( + transport: str = "grpc", request_type=metadata_service.GetContextRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, context.Context) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.parent_contexts == ["parent_contexts_value"] + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_get_context_from_dict(): + test_get_context(request_type=dict) + + +def test_get_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + client.get_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetContextRequest() + + +@pytest.mark.asyncio +async def test_get_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.GetContextRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.parent_contexts == ["parent_contexts_value"] + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_context_async_from_dict(): + await test_get_context_async(request_type=dict) + + +def test_get_context_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + call.return_value = context.Context() + + client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_context_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + + await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_context_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_context_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_context( + metadata_service.GetContextRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_context_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_context_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_context( + metadata_service.GetContextRequest(), name="name_value", + ) + + +def test_list_contexts( + transport: str = "grpc", request_type=metadata_service.ListContextsRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListContextsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_contexts_from_dict(): + test_list_contexts(request_type=dict) + + +def test_list_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + client.list_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListContextsRequest() + + +@pytest.mark.asyncio +async def test_list_contexts_async( + transport: str = "grpc_asyncio", request_type=metadata_service.ListContextsRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListContextsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_contexts_async_from_dict(): + await test_list_contexts_async(request_type=dict) + + +def test_list_contexts_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + call.return_value = metadata_service.ListContextsResponse() + + client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_contexts_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListContextsResponse() + ) + + await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_contexts_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_contexts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_contexts_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_contexts( + metadata_service.ListContextsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_contexts_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListContextsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_contexts(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_contexts( + metadata_service.ListContextsRequest(), parent="parent_value", + ) + + +def test_list_contexts_pager(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_contexts(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, context.Context) for i in results) + + +def test_list_contexts_pages(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_contexts), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + pages = list(client.list_contexts(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_contexts_async_pager(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + async_pager = await client.list_contexts(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, context.Context) for i in responses) + + +@pytest.mark.asyncio +async def test_list_contexts_async_pages(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(), context.Context(),], + next_page_token="abc", + ), + metadata_service.ListContextsResponse(contexts=[], next_page_token="def",), + metadata_service.ListContextsResponse( + contexts=[context.Context(),], next_page_token="ghi", + ), + metadata_service.ListContextsResponse( + contexts=[context.Context(), context.Context(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_contexts(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_context( + transport: str = "grpc", request_type=metadata_service.UpdateContextRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_context.Context) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.parent_contexts == ["parent_contexts_value"] + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_update_context_from_dict(): + test_update_context(request_type=dict) + + +def test_update_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + client.update_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateContextRequest() + + +@pytest.mark.asyncio +async def test_update_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.UpdateContextRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_context.Context( + name="name_value", + display_name="display_name_value", + etag="etag_value", + parent_contexts=["parent_contexts_value"], + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + assert response.parent_contexts == ["parent_contexts_value"] + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_update_context_async_from_dict(): + await test_update_context_async(request_type=dict) + + +def test_update_context_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + request.context.name = "context.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + call.return_value = gca_context.Context() + + client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context.name=context.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_context_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + request.context.name = "context.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + + await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context.name=context.name/value",) in kw[ + "metadata" + ] + + +def test_update_context_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_context( + context=gca_context.Context(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].context == gca_context.Context(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_context_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_context_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_context( + context=gca_context.Context(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].context == gca_context.Context(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_context_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_context( + transport: str = "grpc", request_type=metadata_service.DeleteContextRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_context_from_dict(): + test_delete_context(request_type=dict) + + +def test_delete_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + client.delete_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.DeleteContextRequest() + + +@pytest.mark.asyncio +async def test_delete_context_async( + transport: str = "grpc_asyncio", request_type=metadata_service.DeleteContextRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_context_async_from_dict(): + await test_delete_context_async(request_type=dict) + + +def test_delete_context_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_context_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_context_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_context_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_context( + metadata_service.DeleteContextRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_context_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_context), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_context(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_context_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_context( + metadata_service.DeleteContextRequest(), name="name_value", + ) + + +def test_add_context_artifacts_and_executions( + transport: str = "grpc", + request_type=metadata_service.AddContextArtifactsAndExecutionsRequest, +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + response = client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance( + response, metadata_service.AddContextArtifactsAndExecutionsResponse + ) + + +def test_add_context_artifacts_and_executions_from_dict(): + test_add_context_artifacts_and_executions(request_type=dict) + + +def test_add_context_artifacts_and_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + client.add_context_artifacts_and_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.AddContextArtifactsAndExecutionsRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextArtifactsAndExecutionsResponse() + ) + + response = await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, metadata_service.AddContextArtifactsAndExecutionsResponse + ) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async_from_dict(): + await test_add_context_artifacts_and_executions_async(request_type=dict) + + +def test_add_context_artifacts_and_executions_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextArtifactsAndExecutionsResponse() + ) + + await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +def test_add_context_artifacts_and_executions_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_artifacts_and_executions( + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].context == "context_value" + + assert args[0].artifacts == ["artifacts_value"] + + assert args[0].executions == ["executions_value"] + + +def test_add_context_artifacts_and_executions_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextArtifactsAndExecutionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_artifacts_and_executions( + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].context == "context_value" + + assert args[0].artifacts == ["artifacts_value"] + + assert args[0].executions == ["executions_value"] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context="context_value", + artifacts=["artifacts_value"], + executions=["executions_value"], + ) + + +def test_add_context_children( + transport: str = "grpc", request_type=metadata_service.AddContextChildrenRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + + response = client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +def test_add_context_children_from_dict(): + test_add_context_children(request_type=dict) + + +def test_add_context_children_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + client.add_context_children() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddContextChildrenRequest() + + +@pytest.mark.asyncio +async def test_add_context_children_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.AddContextChildrenRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextChildrenResponse() + ) + + response = await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +@pytest.mark.asyncio +async def test_add_context_children_async_from_dict(): + await test_add_context_children_async(request_type=dict) + + +def test_add_context_children_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + call.return_value = metadata_service.AddContextChildrenResponse() + + client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_add_context_children_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextChildrenResponse() + ) + + await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +def test_add_context_children_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_children( + context="context_value", child_contexts=["child_contexts_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].context == "context_value" + + assert args[0].child_contexts == ["child_contexts_value"] + + +def test_add_context_children_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context="context_value", + child_contexts=["child_contexts_value"], + ) + + +@pytest.mark.asyncio +async def test_add_context_children_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddContextChildrenResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_children( + context="context_value", child_contexts=["child_contexts_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].context == "context_value" + + assert args[0].child_contexts == ["child_contexts_value"] + + +@pytest.mark.asyncio +async def test_add_context_children_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context="context_value", + child_contexts=["child_contexts_value"], + ) + + +def test_query_context_lineage_subgraph( + transport: str = "grpc", + request_type=metadata_service.QueryContextLineageSubgraphRequest, +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + response = client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_context_lineage_subgraph_from_dict(): + test_query_context_lineage_subgraph(request_type=dict) + + +def test_query_context_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + client.query_context_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.QueryContextLineageSubgraphRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + + response = await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async_from_dict(): + await test_query_context_lineage_subgraph_async(request_type=dict) + + +def test_query_context_lineage_subgraph_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + call.return_value = lineage_subgraph.LineageSubgraph() + + client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + request.context = "context/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + + await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "context=context/value",) in kw["metadata"] + + +def test_query_context_lineage_subgraph_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_context_lineage_subgraph(context="context_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].context == "context_value" + + +def test_query_context_lineage_subgraph_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context="context_value", + ) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_context_lineage_subgraph(context="context_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].context == "context_value" + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context="context_value", + ) + + +def test_create_execution( + transport: str = "grpc", request_type=metadata_service.CreateExecutionRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_execution.Execution) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == gca_execution.Execution.State.NEW + + assert response.etag == "etag_value" + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_create_execution_from_dict(): + test_create_execution(request_type=dict) + + +def test_create_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + client.create_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateExecutionRequest() + + +@pytest.mark.asyncio +async def test_create_execution_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.CreateExecutionRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == gca_execution.Execution.State.NEW + + assert response.etag == "etag_value" + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_execution_async_from_dict(): + await test_create_execution_async(request_type=dict) + + +def test_create_execution_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + call.return_value = gca_execution.Execution() + + client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_execution_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + + await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_execution_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_execution( + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].execution == gca_execution.Execution(name="name_value") + + assert args[0].execution_id == "execution_id_value" + + +def test_create_execution_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_execution( + metadata_service.CreateExecutionRequest(), + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_execution_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_execution( + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].execution == gca_execution.Execution(name="name_value") + + assert args[0].execution_id == "execution_id_value" + + +@pytest.mark.asyncio +async def test_create_execution_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_execution( + metadata_service.CreateExecutionRequest(), + parent="parent_value", + execution=gca_execution.Execution(name="name_value"), + execution_id="execution_id_value", + ) + + +def test_get_execution( + transport: str = "grpc", request_type=metadata_service.GetExecutionRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution( + name="name_value", + display_name="display_name_value", + state=execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, execution.Execution) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == execution.Execution.State.NEW + + assert response.etag == "etag_value" + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_get_execution_from_dict(): + test_get_execution(request_type=dict) + + +def test_get_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + client.get_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetExecutionRequest() + + +@pytest.mark.asyncio +async def test_get_execution_async( + transport: str = "grpc_asyncio", request_type=metadata_service.GetExecutionRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + execution.Execution( + name="name_value", + display_name="display_name_value", + state=execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == execution.Execution.State.NEW + + assert response.etag == "etag_value" + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_execution_async_from_dict(): + await test_get_execution_async(request_type=dict) + + +def test_get_execution_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + call.return_value = execution.Execution() + + client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_execution_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + + await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_execution_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_execution(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_execution_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_execution( + metadata_service.GetExecutionRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_execution_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_execution(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_execution_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_execution( + metadata_service.GetExecutionRequest(), name="name_value", + ) + + +def test_list_executions( + transport: str = "grpc", request_type=metadata_service.ListExecutionsRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListExecutionsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_executions_from_dict(): + test_list_executions(request_type=dict) + + +def test_list_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + client.list_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListExecutionsRequest() + + +@pytest.mark.asyncio +async def test_list_executions_async( + transport: str = "grpc_asyncio", request_type=metadata_service.ListExecutionsRequest +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListExecutionsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_executions_async_from_dict(): + await test_list_executions_async(request_type=dict) + + +def test_list_executions_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + call.return_value = metadata_service.ListExecutionsResponse() + + client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_executions_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListExecutionsResponse() + ) + + await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_executions_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_executions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_executions_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_executions( + metadata_service.ListExecutionsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_executions_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListExecutionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_executions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_executions_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_executions( + metadata_service.ListExecutionsRequest(), parent="parent_value", + ) + + +def test_list_executions_pager(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_executions(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, execution.Execution) for i in results) + + +def test_list_executions_pages(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_executions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + pages = list(client.list_executions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_executions_async_pager(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + async_pager = await client.list_executions(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, execution.Execution) for i in responses) + + +@pytest.mark.asyncio +async def test_list_executions_async_pages(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token="abc", + ), + metadata_service.ListExecutionsResponse( + executions=[], next_page_token="def", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(),], next_page_token="ghi", + ), + metadata_service.ListExecutionsResponse( + executions=[execution.Execution(), execution.Execution(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_executions(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_execution( + transport: str = "grpc", request_type=metadata_service.UpdateExecutionRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + + response = client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_execution.Execution) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == gca_execution.Execution.State.NEW + + assert response.etag == "etag_value" + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +def test_update_execution_from_dict(): + test_update_execution(request_type=dict) + + +def test_update_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + client.update_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateExecutionRequest() + + +@pytest.mark.asyncio +async def test_update_execution_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.UpdateExecutionRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution( + name="name_value", + display_name="display_name_value", + state=gca_execution.Execution.State.NEW, + etag="etag_value", + schema_title="schema_title_value", + schema_version="schema_version_value", + description="description_value", + ) + ) + + response = await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == gca_execution.Execution.State.NEW + + assert response.etag == "etag_value" + + assert response.schema_title == "schema_title_value" + + assert response.schema_version == "schema_version_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_update_execution_async_from_dict(): + await test_update_execution_async(request_type=dict) + + +def test_update_execution_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + request.execution.name = "execution.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + call.return_value = gca_execution.Execution() + + client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution.name=execution.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_execution_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + request.execution.name = "execution.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + + await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution.name=execution.name/value",) in kw[ + "metadata" + ] + + +def test_update_execution_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_execution( + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].execution == gca_execution.Execution(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_execution_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_execution_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_execution), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_execution.Execution() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_execution( + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].execution == gca_execution.Execution(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_execution_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_add_execution_events( + transport: str = "grpc", request_type=metadata_service.AddExecutionEventsRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + + response = client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +def test_add_execution_events_from_dict(): + test_add_execution_events(request_type=dict) + + +def test_add_execution_events_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + client.add_execution_events() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddExecutionEventsRequest() + + +@pytest.mark.asyncio +async def test_add_execution_events_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.AddExecutionEventsRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddExecutionEventsResponse() + ) + + response = await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +@pytest.mark.asyncio +async def test_add_execution_events_async_from_dict(): + await test_add_execution_events_async(request_type=dict) + + +def test_add_execution_events_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + call.return_value = metadata_service.AddExecutionEventsResponse() + + client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_add_execution_events_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddExecutionEventsResponse() + ) + + await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +def test_add_execution_events_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_execution_events( + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].execution == "execution_value" + + assert args[0].events == [event.Event(artifact="artifact_value")] + + +def test_add_execution_events_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.AddExecutionEventsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_execution_events( + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].execution == "execution_value" + + assert args[0].events == [event.Event(artifact="artifact_value")] + + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution="execution_value", + events=[event.Event(artifact="artifact_value")], + ) + + +def test_query_execution_inputs_and_outputs( + transport: str = "grpc", + request_type=metadata_service.QueryExecutionInputsAndOutputsRequest, +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + response = client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_execution_inputs_and_outputs_from_dict(): + test_query_execution_inputs_and_outputs(request_type=dict) + + +def test_query_execution_inputs_and_outputs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + client.query_execution_inputs_and_outputs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.QueryExecutionInputsAndOutputsRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + + response = await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async_from_dict(): + await test_query_execution_inputs_and_outputs_async(request_type=dict) + + +def test_query_execution_inputs_and_outputs_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + call.return_value = lineage_subgraph.LineageSubgraph() + + client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + request.execution = "execution/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + + await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"] + + +def test_query_execution_inputs_and_outputs_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_execution_inputs_and_outputs(execution="execution_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].execution == "execution_value" + + +def test_query_execution_inputs_and_outputs_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution="execution_value", + ) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_execution_inputs_and_outputs( + execution="execution_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].execution == "execution_value" + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution="execution_value", + ) + + +def test_create_metadata_schema( + transport: str = "grpc", request_type=metadata_service.CreateMetadataSchemaRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + + response = client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_metadata_schema.MetadataSchema) + + assert response.name == "name_value" + + assert response.schema_version == "schema_version_value" + + assert response.schema == "schema_value" + + assert ( + response.schema_type + == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + + assert response.description == "description_value" + + +def test_create_metadata_schema_from_dict(): + test_create_metadata_schema(request_type=dict) + + +def test_create_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + client.create_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.CreateMetadataSchemaRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + ) + + response = await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + + assert response.name == "name_value" + + assert response.schema_version == "schema_version_value" + + assert response.schema == "schema_value" + + assert ( + response.schema_type + == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async_from_dict(): + await test_create_metadata_schema_async(request_type=dict) + + +def test_create_metadata_schema_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + call.return_value = gca_metadata_schema.MetadataSchema() + + client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_metadata_schema.MetadataSchema() + ) + + await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_metadata_schema_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_schema( + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema( + name="name_value" + ) + + assert args[0].metadata_schema_id == "metadata_schema_id_value" + + +def test_create_metadata_schema_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_metadata_schema.MetadataSchema() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_schema( + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema( + name="name_value" + ) + + assert args[0].metadata_schema_id == "metadata_schema_id_value" + + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent="parent_value", + metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"), + metadata_schema_id="metadata_schema_id_value", + ) + + +def test_get_metadata_schema( + transport: str = "grpc", request_type=metadata_service.GetMetadataSchemaRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + + response = client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, metadata_schema.MetadataSchema) + + assert response.name == "name_value" + + assert response.schema_version == "schema_version_value" + + assert response.schema == "schema_value" + + assert ( + response.schema_type + == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + + assert response.description == "description_value" + + +def test_get_metadata_schema_from_dict(): + test_get_metadata_schema(request_type=dict) + + +def test_get_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + client.get_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.GetMetadataSchemaRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_schema.MetadataSchema( + name="name_value", + schema_version="schema_version_value", + schema="schema_value", + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description="description_value", + ) + ) + + response = await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + + assert response.name == "name_value" + + assert response.schema_version == "schema_version_value" + + assert response.schema == "schema_value" + + assert ( + response.schema_type + == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + ) + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async_from_dict(): + await test_get_metadata_schema_async(request_type=dict) + + +def test_get_metadata_schema_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + call.return_value = metadata_schema.MetadataSchema() + + client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_schema.MetadataSchema() + ) + + await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_metadata_schema_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_schema(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_metadata_schema_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_schema.MetadataSchema() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_schema(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), name="name_value", + ) + + +def test_list_metadata_schemas( + transport: str = "grpc", request_type=metadata_service.ListMetadataSchemasRequest +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListMetadataSchemasPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_metadata_schemas_from_dict(): + test_list_metadata_schemas(request_type=dict) + + +def test_list_metadata_schemas_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + client.list_metadata_schemas() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.ListMetadataSchemasRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataSchemasResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_from_dict(): + await test_list_metadata_schemas_async(request_type=dict) + + +def test_list_metadata_schemas_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + call.return_value = metadata_service.ListMetadataSchemasResponse() + + client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataSchemasResponse() + ) + + await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_metadata_schemas_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_schemas(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_metadata_schemas_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + metadata_service.ListMetadataSchemasResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_schemas(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), parent="parent_value", + ) + + +def test_list_metadata_schemas_pager(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_metadata_schemas(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) for i in results) + + +def test_list_metadata_schemas_pages(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_schemas(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pager(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_schemas(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pages(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token="abc", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], next_page_token="def", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[metadata_schema.MetadataSchema(),], + next_page_token="ghi", + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_schemas(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_query_artifact_lineage_subgraph( + transport: str = "grpc", + request_type=metadata_service.QueryArtifactLineageSubgraphRequest, +): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + response = client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_artifact_lineage_subgraph_from_dict(): + test_query_artifact_lineage_subgraph(request_type=dict) + + +def test_query_artifact_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + client.query_artifact_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async( + transport: str = "grpc_asyncio", + request_type=metadata_service.QueryArtifactLineageSubgraphRequest, +): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + + response = await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async_from_dict(): + await test_query_artifact_lineage_subgraph_async(request_type=dict) + + +def test_query_artifact_lineage_subgraph_field_headers(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + request.artifact = "artifact/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + call.return_value = lineage_subgraph.LineageSubgraph() + + client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact=artifact/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + request.artifact = "artifact/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + + await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "artifact=artifact/value",) in kw["metadata"] + + +def test_query_artifact_lineage_subgraph_flattened(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_artifact_lineage_subgraph(artifact="artifact_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].artifact == "artifact_value" + + +def test_query_artifact_lineage_subgraph_flattened_error(): + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact="artifact_value", + ) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + lineage_subgraph.LineageSubgraph() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_artifact_lineage_subgraph( + artifact="artifact_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].artifact == "artifact_value" + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact="artifact_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = MetadataServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MetadataServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MetadataServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.MetadataServiceGrpcTransport,) + + +def test_metadata_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.MetadataServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_metadata_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.MetadataServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_metadata_store", + "get_metadata_store", + "list_metadata_stores", + "delete_metadata_store", + "create_artifact", + "get_artifact", + "list_artifacts", + "update_artifact", + "create_context", + "get_context", + "list_contexts", + "update_context", + "delete_context", + "add_context_artifacts_and_executions", + "add_context_children", + "query_context_lineage_subgraph", + "create_execution", + "get_execution", + "list_executions", + "update_execution", + "add_execution_events", + "query_execution_inputs_and_outputs", + "create_metadata_schema", + "get_metadata_schema", + "list_metadata_schemas", + "query_artifact_lineage_subgraph", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_metadata_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_metadata_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport() + adc.assert_called_once() + + +def test_metadata_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + MetadataServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_metadata_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.MetadataServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_metadata_service_host_no_port(): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_metadata_service_host_with_port(): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_metadata_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_metadata_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_metadata_service_grpc_lro_client(): + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_metadata_service_grpc_lro_async_client(): + client = MetadataServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + actual = MetadataServiceClient.artifact_path( + project, location, metadata_store, artifact + ) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = MetadataServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_artifact_path(path) + assert expected == actual + + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + actual = MetadataServiceClient.context_path( + project, location, metadata_store, context + ) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = MetadataServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_context_path(path) + assert expected == actual + + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + actual = MetadataServiceClient.execution_path( + project, location, metadata_store, execution + ) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = MetadataServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_execution_path(path) + assert expected == actual + + +def test_metadata_schema_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + metadata_schema = "octopus" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format( + project=project, + location=location, + metadata_store=metadata_store, + metadata_schema=metadata_schema, + ) + actual = MetadataServiceClient.metadata_schema_path( + project, location, metadata_store, metadata_schema + ) + assert expected == actual + + +def test_parse_metadata_schema_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "metadata_schema": "mussel", + } + path = MetadataServiceClient.metadata_schema_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_schema_path(path) + assert expected == actual + + +def test_metadata_store_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format( + project=project, location=location, metadata_store=metadata_store, + ) + actual = MetadataServiceClient.metadata_store_path( + project, location, metadata_store + ) + assert expected == actual + + +def test_parse_metadata_store_path(): + expected = { + "project": "abalone", + "location": "squid", + "metadata_store": "clam", + } + path = MetadataServiceClient.metadata_store_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_store_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = MetadataServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = MetadataServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + + expected = "folders/{folder}".format(folder=folder,) + actual = MetadataServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = MetadataServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + + expected = "organizations/{organization}".format(organization=organization,) + actual = MetadataServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = MetadataServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + + expected = "projects/{project}".format(project=project,) + actual = MetadataServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = MetadataServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = MetadataServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = MetadataServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.MetadataServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = MetadataServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.MetadataServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = MetadataServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 37ae2b65e8..f547beb6bf 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -95,7 +95,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,], + "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,] ) def test_migration_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -112,7 +112,7 @@ def test_migration_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,], + "client_class", [MigrationServiceClient, MigrationServiceAsyncClient,] ) def test_migration_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -1551,19 +1551,21 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" + location = "mussel" + dataset = "winkle" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -1573,21 +1575,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" + project = "squid" + dataset = "clam" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", + "project": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py index 51cbd4583f..a31f13c873 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -101,9 +101,7 @@ def test__get_default_mtls_endpoint(): assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -@pytest.mark.parametrize( - "client_class", [ModelServiceClient, ModelServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [ModelServiceClient, ModelServiceAsyncClient,]) def test_model_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( @@ -118,9 +116,7 @@ def test_model_service_client_from_service_account_info(client_class): assert client.transport._host == "aiplatform.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [ModelServiceClient, ModelServiceAsyncClient,], -) +@pytest.mark.parametrize("client_class", [ModelServiceClient, ModelServiceAsyncClient,]) def test_model_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py index d1d65aecbd..59218c0ed9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -43,20 +43,26 @@ ) from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers from google.cloud.aiplatform_v1beta1.services.pipeline_service import transports +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context from google.cloud.aiplatform_v1beta1.types import deployed_model_ref from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import execution from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import explanation_metadata from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import pipeline_state from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( training_pipeline as gca_training_pipeline, ) +from google.cloud.aiplatform_v1beta1.types import value from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import any_pb2 as gp_any # type: ignore @@ -111,7 +117,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,], + "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,] ) def test_pipeline_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -128,7 +134,7 @@ def test_pipeline_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,], + "client_class", [PipelineServiceClient, PipelineServiceAsyncClient,] ) def test_pipeline_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -1808,6 +1814,1321 @@ async def test_cancel_training_pipeline_flattened_error_async(): ) +def test_create_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.CreatePipelineJobRequest +): + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + + response = client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_pipeline_job.PipelineJob) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + assert response.service_account == "service_account_value" + + assert response.network == "network_value" + + +def test_create_pipeline_job_from_dict(): + test_create_pipeline_job(request_type=dict) + + +def test_create_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + client.create_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.CreatePipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + ) + + response = await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + assert response.service_account == "service_account_value" + + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async_from_dict(): + await test_create_pipeline_job_async(request_type=dict) + + +def test_create_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + call.return_value = gca_pipeline_job.PipelineJob() + + client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob() + ) + + await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_pipeline_job( + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name="name_value") + + assert args[0].pipeline_job_id == "pipeline_job_id_value" + + +def test_create_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_pipeline_job( + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name="name_value") + + assert args[0].pipeline_job_id == "pipeline_job_id_value" + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + +def test_get_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.GetPipelineJobRequest +): + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + + response = client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pipeline_job.PipelineJob) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + assert response.service_account == "service_account_value" + + assert response.network == "network_value" + + +def test_get_pipeline_job_from_dict(): + test_get_pipeline_job(request_type=dict) + + +def test_get_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + client.get_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.GetPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async( + transport: str = "grpc_asyncio", request_type=pipeline_service.GetPipelineJobRequest +): + client = PipelineServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + ) + + response = await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + assert response.service_account == "service_account_value" + + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async_from_dict(): + await test_get_pipeline_job_async(request_type=dict) + + +def test_get_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + call.return_value = pipeline_job.PipelineJob() + + client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob() + ) + + await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), name="name_value", + ) + + +def test_list_pipeline_jobs( + transport: str = "grpc", request_type=pipeline_service.ListPipelineJobsRequest +): + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListPipelineJobsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_pipeline_jobs_from_dict(): + test_list_pipeline_jobs(request_type=dict) + + +def test_list_pipeline_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + client.list_pipeline_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.ListPipelineJobsRequest, +): + client = PipelineServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_from_dict(): + await test_list_pipeline_jobs_async(request_type=dict) + + +def test_list_pipeline_jobs_field_headers(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + call.return_value = pipeline_service.ListPipelineJobsResponse() + + client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_field_headers_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse() + ) + + await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_pipeline_jobs_flattened(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pipeline_jobs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_pipeline_jobs_flattened_error(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pipeline_jobs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_error_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), parent="parent_value", + ) + + +def test_list_pipeline_jobs_pager(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_pipeline_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) for i in results) + + +def test_list_pipeline_jobs_pages(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + pages = list(client.list_pipeline_jobs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pager(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + async_pager = await client.list_pipeline_jobs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) for i in responses) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pages(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_pipeline_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.DeletePipelineJobRequest +): + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_pipeline_job_from_dict(): + test_delete_pipeline_job(request_type=dict) + + +def test_delete_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + client.delete_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.DeletePipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async_from_dict(): + await test_delete_pipeline_job_async(request_type=dict) + + +def test_delete_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), name="name_value", + ) + + +def test_cancel_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.CancelPipelineJobRequest +): + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_pipeline_job_from_dict(): + test_cancel_pipeline_job(request_type=dict) + + +def test_cancel_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + client.cancel_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.CancelPipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async_from_dict(): + await test_cancel_pipeline_job_async(request_type=dict) + + +def test_cancel_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + call.return_value = None + + client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_cancel_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_cancel_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), name="name_value", + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.PipelineServiceGrpcTransport( @@ -1910,6 +3231,11 @@ def test_pipeline_service_base_transport(): "list_training_pipelines", "delete_training_pipeline", "cancel_training_pipeline", + "create_pipeline_job", + "get_pipeline_job", + "list_pipeline_jobs", + "delete_pipeline_job", + "cancel_pipeline_job", ) for method in methods: with pytest.raises(NotImplementedError): @@ -2191,10 +3517,99 @@ def test_pipeline_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_endpoint_path(): +def test_artifact_path(): project = "squid" location = "clam" - endpoint = "whelk" + metadata_store = "whelk" + artifact = "octopus" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + actual = PipelineServiceClient.artifact_path( + project, location, metadata_store, artifact + ) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = PipelineServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_artifact_path(path) + assert expected == actual + + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + actual = PipelineServiceClient.context_path( + project, location, metadata_store, context + ) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = PipelineServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_context_path(path) + assert expected == actual + + +def test_custom_job_path(): + project = "oyster" + location = "nudibranch" + custom_job = "cuttlefish" + + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format( + project=project, location=location, custom_job=custom_job, + ) + actual = PipelineServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "custom_job": "nautilus", + } + path = PipelineServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_custom_job_path(path) + assert expected == actual + + +def test_endpoint_path(): + project = "scallop" + location = "abalone" + endpoint = "squid" expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, @@ -2205,9 +3620,9 @@ def test_endpoint_path(): def test_parse_endpoint_path(): expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", + "project": "clam", + "location": "whelk", + "endpoint": "octopus", } path = PipelineServiceClient.endpoint_path(**expected) @@ -2216,10 +3631,42 @@ def test_parse_endpoint_path(): assert expected == actual +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + actual = PipelineServiceClient.execution_path( + project, location, metadata_store, execution + ) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = PipelineServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_execution_path(path) + assert expected == actual + + def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" + project = "squid" + location = "clam" + model = "whelk" expected = "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, @@ -2230,9 +3677,9 @@ def test_model_path(): def test_parse_model_path(): expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", + "project": "octopus", + "location": "oyster", + "model": "nudibranch", } path = PipelineServiceClient.model_path(**expected) @@ -2241,10 +3688,58 @@ def test_parse_model_path(): assert expected == actual +def test_network_path(): + project = "cuttlefish" + network = "mussel" + + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = PipelineServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = PipelineServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_network_path(path) + assert expected == actual + + +def test_pipeline_job_path(): + project = "scallop" + location = "abalone" + pipeline_job = "squid" + + expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, location=location, pipeline_job=pipeline_job, + ) + actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "clam", + "location": "whelk", + "pipeline_job": "octopus", + } + path = PipelineServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_pipeline_job_path(path) + assert expected == actual + + def test_training_pipeline_path(): - project = "squid" - location = "clam" - training_pipeline = "whelk" + project = "oyster" + location = "nudibranch" + training_pipeline = "cuttlefish" expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( project=project, location=location, training_pipeline=training_pipeline, @@ -2257,9 +3752,9 @@ def test_training_pipeline_path(): def test_parse_training_pipeline_path(): expected = { - "project": "octopus", - "location": "oyster", - "training_pipeline": "nudibranch", + "project": "mussel", + "location": "winkle", + "training_pipeline": "nautilus", } path = PipelineServiceClient.training_pipeline_path(**expected) @@ -2269,7 +3764,7 @@ def test_parse_training_pipeline_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -2280,7 +3775,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "abalone", } path = PipelineServiceClient.common_billing_account_path(**expected) @@ -2290,7 +3785,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "squid" expected = "folders/{folder}".format(folder=folder,) actual = PipelineServiceClient.common_folder_path(folder) @@ -2299,7 +3794,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "clam", } path = PipelineServiceClient.common_folder_path(**expected) @@ -2309,7 +3804,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "whelk" expected = "organizations/{organization}".format(organization=organization,) actual = PipelineServiceClient.common_organization_path(organization) @@ -2318,7 +3813,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "octopus", } path = PipelineServiceClient.common_organization_path(**expected) @@ -2328,7 +3823,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "oyster" expected = "projects/{project}".format(project=project,) actual = PipelineServiceClient.common_project_path(project) @@ -2337,7 +3832,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nudibranch", } path = PipelineServiceClient.common_project_path(**expected) @@ -2347,8 +3842,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -2359,8 +3854,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "winkle", + "location": "nautilus", } path = PipelineServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py index 879a0a69d5..3daed56994 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -98,7 +98,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,], + "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,] ) def test_specialist_pool_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -115,7 +115,7 @@ def test_specialist_pool_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,], + "client_class", [SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient,] ) def test_specialist_pool_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py new file mode 100644 index 0000000000..cfbde666ce --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -0,0 +1,8115 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( + TensorboardServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( + TensorboardServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import transports +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_experiment as gca_tensorboard_experiment, +) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import ( + tensorboard_time_series as gca_tensorboard_time_series, +) +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None + assert ( + TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [TensorboardServiceClient, TensorboardServiceAsyncClient,] +) +def test_tensorboard_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [TensorboardServiceClient, TensorboardServiceAsyncClient,] +) +def test_tensorboard_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_tensorboard_service_client_get_transport_class(): + transport = TensorboardServiceClient.get_transport_class() + available_transports = [ + transports.TensorboardServiceGrpcTransport, + ] + assert transport in available_transports + + transport = TensorboardServiceClient.get_transport_class("grpc") + assert transport == transports.TensorboardServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + ( + TensorboardServiceAsyncClient, + transports.TensorboardServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + TensorboardServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(TensorboardServiceClient), +) +@mock.patch.object( + TensorboardServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(TensorboardServiceAsyncClient), +) +def test_tensorboard_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TensorboardServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TensorboardServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + TensorboardServiceClient, + transports.TensorboardServiceGrpcTransport, + "grpc", + "true", + ), + ( + TensorboardServiceAsyncClient, + transports.TensorboardServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + TensorboardServiceClient, + transports.TensorboardServiceGrpcTransport, + "grpc", + "false", + ), + ( + TensorboardServiceAsyncClient, + transports.TensorboardServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + TensorboardServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(TensorboardServiceClient), +) +@mock.patch.object( + TensorboardServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(TensorboardServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_tensorboard_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + ( + TensorboardServiceAsyncClient, + transports.TensorboardServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_tensorboard_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + ( + TensorboardServiceAsyncClient, + transports.TensorboardServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_tensorboard_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_tensorboard_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = TensorboardServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_tensorboard( + transport: str = "grpc", request_type=tensorboard_service.CreateTensorboardRequest +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_tensorboard_from_dict(): + test_create_tensorboard(request_type=dict) + + +def test_create_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), "__call__" + ) as call: + client.create_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.CreateTensorboardRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_tensorboard_async_from_dict(): + await test_create_tensorboard_async(request_type=dict) + + +def test_create_tensorboard_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_tensorboard_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard( + parent="parent_value", + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name="name_value") + + +def test_create_tensorboard_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent="parent_value", + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard( + parent="parent_value", + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name="name_value") + + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent="parent_value", + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + ) + + +def test_get_tensorboard( + transport: str = "grpc", request_type=tensorboard_service.GetTensorboardRequest +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard( + name="name_value", + display_name="display_name_value", + description="description_value", + blob_storage_path_prefix="blob_storage_path_prefix_value", + run_count=989, + etag="etag_value", + ) + + response = client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, tensorboard.Tensorboard) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.blob_storage_path_prefix == "blob_storage_path_prefix_value" + + assert response.run_count == 989 + + assert response.etag == "etag_value" + + +def test_get_tensorboard_from_dict(): + test_get_tensorboard(request_type=dict) + + +def test_get_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call: + client.get_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.GetTensorboardRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard.Tensorboard( + name="name_value", + display_name="display_name_value", + description="description_value", + blob_storage_path_prefix="blob_storage_path_prefix_value", + run_count=989, + etag="etag_value", + ) + ) + + response = await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard.Tensorboard) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.blob_storage_path_prefix == "blob_storage_path_prefix_value" + + assert response.run_count == 989 + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_tensorboard_async_from_dict(): + await test_get_tensorboard_async(request_type=dict) + + +def test_get_tensorboard_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call: + call.return_value = tensorboard.Tensorboard() + + client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard.Tensorboard() + ) + + await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_tensorboard_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_tensorboard_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard.Tensorboard() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), name="name_value", + ) + + +def test_update_tensorboard( + transport: str = "grpc", request_type=tensorboard_service.UpdateTensorboardRequest +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_tensorboard_from_dict(): + test_update_tensorboard(request_type=dict) + + +def test_update_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), "__call__" + ) as call: + client.update_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.UpdateTensorboardRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_tensorboard_async_from_dict(): + await test_update_tensorboard_async(request_type=dict) + + +def test_update_tensorboard_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + request.tensorboard.name = "tensorboard.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "tensorboard.name=tensorboard.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + request.tensorboard.name = "tensorboard.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "tensorboard.name=tensorboard.name/value",) in kw[ + "metadata" + ] + + +def test_update_tensorboard_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_tensorboard_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_list_tensorboards( + transport: str = "grpc", request_type=tensorboard_service.ListTensorboardsRequest +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListTensorboardsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_tensorboards_from_dict(): + test_list_tensorboards(request_type=dict) + + +def test_list_tensorboards_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + client.list_tensorboards() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboards_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.ListTensorboardsRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_from_dict(): + await test_list_tensorboards_async(request_type=dict) + + +def test_list_tensorboards_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + call.return_value = tensorboard_service.ListTensorboardsResponse() + + client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_tensorboards_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardsResponse() + ) + + await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_tensorboards_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboards(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_tensorboards_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboards(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), parent="parent_value", + ) + + +def test_list_tensorboards_pager(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_tensorboards(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) for i in results) + + +def test_list_tensorboards_pages(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),], + ), + RuntimeError, + ) + pages = list(client.list_tensorboards(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboards(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(),], next_page_token="ghi", + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[tensorboard.Tensorboard(), tensorboard.Tensorboard(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboards(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_tensorboard( + transport: str = "grpc", request_type=tensorboard_service.DeleteTensorboardRequest +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_from_dict(): + test_delete_tensorboard(request_type=dict) + + +def test_delete_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), "__call__" + ) as call: + client.delete_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.DeleteTensorboardRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_async_from_dict(): + await test_delete_tensorboard_async(request_type=dict) + + +def test_delete_tensorboard_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_tensorboard_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_tensorboard_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), name="name_value", + ) + + +def test_create_tensorboard_experiment( + transport: str = "grpc", + request_type=tensorboard_service.CreateTensorboardExperimentRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + source="source_value", + ) + + response = client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.source == "source_value" + + +def test_create_tensorboard_experiment_from_dict(): + test_create_tensorboard_experiment(request_type=dict) + + +def test_create_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), "__call__" + ) as call: + client.create_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.CreateTensorboardExperimentRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_experiment.TensorboardExperiment( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + source="source_value", + ) + ) + + response = await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.source == "source_value" + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async_from_dict(): + await test_create_tensorboard_experiment_async(request_type=dict) + + +def test_create_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), "__call__" + ) as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_experiment.TensorboardExperiment() + ) + + await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_tensorboard_experiment_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_experiment( + parent="parent_value", + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[ + 0 + ].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ) + + assert args[0].tensorboard_experiment_id == "tensorboard_experiment_id_value" + + +def test_create_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent="parent_value", + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_experiment.TensorboardExperiment() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_experiment( + parent="parent_value", + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[ + 0 + ].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ) + + assert args[0].tensorboard_experiment_id == "tensorboard_experiment_id_value" + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent="parent_value", + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + +def test_get_tensorboard_experiment( + transport: str = "grpc", + request_type=tensorboard_service.GetTensorboardExperimentRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + source="source_value", + ) + + response = client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.source == "source_value" + + +def test_get_tensorboard_experiment_from_dict(): + test_get_tensorboard_experiment(request_type=dict) + + +def test_get_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), "__call__" + ) as call: + client.get_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.GetTensorboardExperimentRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_experiment.TensorboardExperiment( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + source="source_value", + ) + ) + + response = await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.source == "source_value" + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async_from_dict(): + await test_get_tensorboard_experiment_async(request_type=dict) + + +def test_get_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), "__call__" + ) as call: + call.return_value = tensorboard_experiment.TensorboardExperiment() + + client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_experiment.TensorboardExperiment() + ) + + await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_tensorboard_experiment_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_experiment(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_experiment.TensorboardExperiment() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_experiment(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), name="name_value", + ) + + +def test_update_tensorboard_experiment( + transport: str = "grpc", + request_type=tensorboard_service.UpdateTensorboardExperimentRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + source="source_value", + ) + + response = client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.source == "source_value" + + +def test_update_tensorboard_experiment_from_dict(): + test_update_tensorboard_experiment(request_type=dict) + + +def test_update_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), "__call__" + ) as call: + client.update_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.UpdateTensorboardExperimentRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_experiment.TensorboardExperiment( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + source="source_value", + ) + ) + + response = await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + assert response.source == "source_value" + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async_from_dict(): + await test_update_tensorboard_experiment_async(request_type=dict) + + +def test_update_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + request.tensorboard_experiment.name = "tensorboard_experiment.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), "__call__" + ) as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_experiment.name=tensorboard_experiment.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + request.tensorboard_experiment.name = "tensorboard_experiment.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_experiment.TensorboardExperiment() + ) + + await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_experiment.name=tensorboard_experiment.name/value", + ) in kw["metadata"] + + +def test_update_tensorboard_experiment_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[ + 0 + ].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_experiment.TensorboardExperiment() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[ + 0 + ].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_list_tensorboard_experiments( + transport: str = "grpc", + request_type=tensorboard_service.ListTensorboardExperimentsRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListTensorboardExperimentsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_tensorboard_experiments_from_dict(): + test_list_tensorboard_experiments(request_type=dict) + + +def test_list_tensorboard_experiments_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + client.list_tensorboard_experiments() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.ListTensorboardExperimentsRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_from_dict(): + await test_list_tensorboard_experiments_async(request_type=dict) + + +def test_list_tensorboard_experiments_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + + client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardExperimentsResponse() + ) + + await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_tensorboard_experiments_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_experiments(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_tensorboard_experiments_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardExperimentsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_experiments(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent="parent_value", + ) + + +def test_list_tensorboard_experiments_pager(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_tensorboard_experiments(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, tensorboard_experiment.TensorboardExperiment) for i in results + ) + + +def test_list_tensorboard_experiments_pages(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_experiments(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_experiments(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, tensorboard_experiment.TensorboardExperiment) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_tensorboard_experiments(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_tensorboard_experiment( + transport: str = "grpc", + request_type=tensorboard_service.DeleteTensorboardExperimentRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_experiment_from_dict(): + test_delete_tensorboard_experiment(request_type=dict) + + +def test_delete_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), "__call__" + ) as call: + client.delete_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.DeleteTensorboardExperimentRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async_from_dict(): + await test_delete_tensorboard_experiment_async(request_type=dict) + + +def test_delete_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_tensorboard_experiment_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_experiment(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_experiment(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), name="name_value", + ) + + +def test_create_tensorboard_run( + transport: str = "grpc", + request_type=tensorboard_service.CreateTensorboardRunRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + + response = client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +def test_create_tensorboard_run_from_dict(): + test_create_tensorboard_run(request_type=dict) + + +def test_create_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), "__call__" + ) as call: + client.create_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.CreateTensorboardRunRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_run.TensorboardRun( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + ) + + response = await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async_from_dict(): + await test_create_tensorboard_run_async(request_type=dict) + + +def test_create_tensorboard_run_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), "__call__" + ) as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + + client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_run.TensorboardRun() + ) + + await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_tensorboard_run_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_run( + parent="parent_value", + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun( + name="name_value" + ) + + assert args[0].tensorboard_run_id == "tensorboard_run_id_value" + + +def test_create_tensorboard_run_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent="parent_value", + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + tensorboard_run_id="tensorboard_run_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_run.TensorboardRun() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_run( + parent="parent_value", + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun( + name="name_value" + ) + + assert args[0].tensorboard_run_id == "tensorboard_run_id_value" + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent="parent_value", + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + tensorboard_run_id="tensorboard_run_id_value", + ) + + +def test_get_tensorboard_run( + transport: str = "grpc", request_type=tensorboard_service.GetTensorboardRunRequest +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + + response = client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, tensorboard_run.TensorboardRun) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +def test_get_tensorboard_run_from_dict(): + test_get_tensorboard_run(request_type=dict) + + +def test_get_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), "__call__" + ) as call: + client.get_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.GetTensorboardRunRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_run.TensorboardRun( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + ) + + response = await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_run.TensorboardRun) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async_from_dict(): + await test_get_tensorboard_run_async(request_type=dict) + + +def test_get_tensorboard_run_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), "__call__" + ) as call: + call.return_value = tensorboard_run.TensorboardRun() + + client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_run.TensorboardRun() + ) + + await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_tensorboard_run_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_run(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_tensorboard_run_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_run.TensorboardRun() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_run(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), name="name_value", + ) + + +def test_update_tensorboard_run( + transport: str = "grpc", + request_type=tensorboard_service.UpdateTensorboardRunRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + + response = client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +def test_update_tensorboard_run_from_dict(): + test_update_tensorboard_run(request_type=dict) + + +def test_update_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), "__call__" + ) as call: + client.update_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.UpdateTensorboardRunRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_run.TensorboardRun( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + ) + + response = await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async_from_dict(): + await test_update_tensorboard_run_async(request_type=dict) + + +def test_update_tensorboard_run_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + request.tensorboard_run.name = "tensorboard_run.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), "__call__" + ) as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + + client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_run.name=tensorboard_run.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + request.tensorboard_run.name = "tensorboard_run.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_run.TensorboardRun() + ) + + await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_run.name=tensorboard_run.name/value", + ) in kw["metadata"] + + +def test_update_tensorboard_run_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_tensorboard_run_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_run.TensorboardRun() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_list_tensorboard_runs( + transport: str = "grpc", request_type=tensorboard_service.ListTensorboardRunsRequest +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListTensorboardRunsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_tensorboard_runs_from_dict(): + test_list_tensorboard_runs(request_type=dict) + + +def test_list_tensorboard_runs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + client.list_tensorboard_runs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.ListTensorboardRunsRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardRunsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardRunsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_from_dict(): + await test_list_tensorboard_runs_async(request_type=dict) + + +def test_list_tensorboard_runs_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + + client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardRunsResponse() + ) + + await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_tensorboard_runs_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_runs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_tensorboard_runs_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardRunsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_runs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), parent="parent_value", + ) + + +def test_list_tensorboard_runs_pager(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[tensorboard_run.TensorboardRun(),], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_tensorboard_runs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) for i in results) + + +def test_list_tensorboard_runs_pages(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[tensorboard_run.TensorboardRun(),], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_runs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[tensorboard_run.TensorboardRun(),], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_runs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[tensorboard_run.TensorboardRun(),], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_runs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_tensorboard_run( + transport: str = "grpc", + request_type=tensorboard_service.DeleteTensorboardRunRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_run_from_dict(): + test_delete_tensorboard_run(request_type=dict) + + +def test_delete_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + client.delete_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.DeleteTensorboardRunRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async_from_dict(): + await test_delete_tensorboard_run_async(request_type=dict) + + +def test_delete_tensorboard_run_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_tensorboard_run_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_run(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_tensorboard_run_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_run(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), name="name_value", + ) + + +def test_create_tensorboard_time_series( + transport: str = "grpc", + request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value", + display_name="display_name_value", + description="description_value", + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag="etag_value", + plugin_name="plugin_name_value", + plugin_data=b"plugin_data_blob", + ) + + response = client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.value_type + == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + ) + + assert response.etag == "etag_value" + + assert response.plugin_name == "plugin_name_value" + + assert response.plugin_data == b"plugin_data_blob" + + +def test_create_tensorboard_time_series_from_dict(): + test_create_tensorboard_time_series(request_type=dict) + + +def test_create_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), "__call__" + ) as call: + client.create_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value", + display_name="display_name_value", + description="description_value", + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag="etag_value", + plugin_name="plugin_name_value", + plugin_data=b"plugin_data_blob", + ) + ) + + response = await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.value_type + == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + ) + + assert response.etag == "etag_value" + + assert response.plugin_name == "plugin_name_value" + + assert response.plugin_data == b"plugin_data_blob" + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async_from_dict(): + await test_create_tensorboard_time_series_async(request_type=dict) + + +def test_create_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), "__call__" + ) as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_time_series.TensorboardTimeSeries() + ) + + await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_tensorboard_time_series_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_time_series( + parent="parent_value", + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[ + 0 + ].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ) + + +def test_create_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent="parent_value", + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_time_series.TensorboardTimeSeries() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_time_series( + parent="parent_value", + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[ + 0 + ].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent="parent_value", + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + ) + + +def test_get_tensorboard_time_series( + transport: str = "grpc", + request_type=tensorboard_service.GetTensorboardTimeSeriesRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries( + name="name_value", + display_name="display_name_value", + description="description_value", + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag="etag_value", + plugin_name="plugin_name_value", + plugin_data=b"plugin_data_blob", + ) + + response = client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.value_type + == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + ) + + assert response.etag == "etag_value" + + assert response.plugin_name == "plugin_name_value" + + assert response.plugin_data == b"plugin_data_blob" + + +def test_get_tensorboard_time_series_from_dict(): + test_get_tensorboard_time_series(request_type=dict) + + +def test_get_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), "__call__" + ) as call: + client.get_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.GetTensorboardTimeSeriesRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_time_series.TensorboardTimeSeries( + name="name_value", + display_name="display_name_value", + description="description_value", + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag="etag_value", + plugin_name="plugin_name_value", + plugin_data=b"plugin_data_blob", + ) + ) + + response = await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.value_type + == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + ) + + assert response.etag == "etag_value" + + assert response.plugin_name == "plugin_name_value" + + assert response.plugin_data == b"plugin_data_blob" + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async_from_dict(): + await test_get_tensorboard_time_series_async(request_type=dict) + + +def test_get_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), "__call__" + ) as call: + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + + client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_time_series.TensorboardTimeSeries() + ) + + await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_tensorboard_time_series_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_time_series(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_time_series.TensorboardTimeSeries() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_time_series(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), name="name_value", + ) + + +def test_update_tensorboard_time_series( + transport: str = "grpc", + request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value", + display_name="display_name_value", + description="description_value", + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag="etag_value", + plugin_name="plugin_name_value", + plugin_data=b"plugin_data_blob", + ) + + response = client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.value_type + == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + ) + + assert response.etag == "etag_value" + + assert response.plugin_name == "plugin_name_value" + + assert response.plugin_data == b"plugin_data_blob" + + +def test_update_tensorboard_time_series_from_dict(): + test_update_tensorboard_time_series(request_type=dict) + + +def test_update_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), "__call__" + ) as call: + client.update_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value", + display_name="display_name_value", + description="description_value", + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag="etag_value", + plugin_name="plugin_name_value", + plugin_data=b"plugin_data_blob", + ) + ) + + response = await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert ( + response.value_type + == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + ) + + assert response.etag == "etag_value" + + assert response.plugin_name == "plugin_name_value" + + assert response.plugin_data == b"plugin_data_blob" + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async_from_dict(): + await test_update_tensorboard_time_series_async(request_type=dict) + + +def test_update_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + request.tensorboard_time_series.name = "tensorboard_time_series.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), "__call__" + ) as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_time_series.name=tensorboard_time_series.name/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + request.tensorboard_time_series.name = "tensorboard_time_series.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_time_series.TensorboardTimeSeries() + ) + + await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_time_series.name=tensorboard_time_series.name/value", + ) in kw["metadata"] + + +def test_update_tensorboard_time_series_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[ + 0 + ].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tensorboard_time_series.TensorboardTimeSeries() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[ + 0 + ].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries( + name="name_value" + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_list_tensorboard_time_series( + transport: str = "grpc", + request_type=tensorboard_service.ListTensorboardTimeSeriesRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListTensorboardTimeSeriesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_tensorboard_time_series_from_dict(): + test_list_tensorboard_time_series(request_type=dict) + + +def test_list_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + client.list_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.ListTensorboardTimeSeriesRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_from_dict(): + await test_list_tensorboard_time_series_async(request_type=dict) + + +def test_list_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + + client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardTimeSeriesResponse() + ) + + await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_tensorboard_time_series_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_time_series(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ListTensorboardTimeSeriesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_time_series(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent="parent_value", + ) + + +def test_list_tensorboard_time_series_pager(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_tensorboard_time_series(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in results + ) + + +def test_list_tensorboard_time_series_pages(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_time_series(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_time_series(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="abc", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], next_page_token="def", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token="ghi", + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_tensorboard_time_series(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_tensorboard_time_series( + transport: str = "grpc", + request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_time_series_from_dict(): + test_delete_tensorboard_time_series(request_type=dict) + + +def test_delete_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), "__call__" + ) as call: + client.delete_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async_from_dict(): + await test_delete_tensorboard_time_series_async(request_type=dict) + + +def test_delete_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_tensorboard_time_series_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_time_series(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_time_series(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), name="name_value", + ) + + +def test_read_tensorboard_time_series_data( + transport: str = "grpc", + request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + + response = client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + + assert isinstance( + response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse + ) + + +def test_read_tensorboard_time_series_data_from_dict(): + test_read_tensorboard_time_series_data(request_type=dict) + + +def test_read_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), "__call__" + ) as call: + client.read_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + ) + + response = await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse + ) + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async_from_dict(): + await test_read_tensorboard_time_series_data_async(request_type=dict) + + +def test_read_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + request.tensorboard_time_series = "tensorboard_time_series/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), "__call__" + ) as call: + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + + client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_time_series=tensorboard_time_series/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + request.tensorboard_time_series = "tensorboard_time_series/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + ) + + await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_time_series=tensorboard_time_series/value", + ) in kw["metadata"] + + +def test_read_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_time_series_data( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_time_series == "tensorboard_time_series_value" + + +def test_read_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series="tensorboard_time_series_value", + ) + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_time_series_data( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_time_series == "tensorboard_time_series_value" + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series="tensorboard_time_series_value", + ) + + +def test_read_tensorboard_blob_data( + transport: str = "grpc", + request_type=tensorboard_service.ReadTensorboardBlobDataRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [tensorboard_service.ReadTensorboardBlobDataResponse()] + ) + + response = client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +def test_read_tensorboard_blob_data_from_dict(): + test_read_tensorboard_blob_data(request_type=dict) + + +def test_read_tensorboard_blob_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), "__call__" + ) as call: + client.read_tensorboard_blob_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.ReadTensorboardBlobDataRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()] + ) + + response = await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async_from_dict(): + await test_read_tensorboard_blob_data_async(request_type=dict) + + +def test_read_tensorboard_blob_data_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + request.time_series = "time_series/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), "__call__" + ) as call: + call.return_value = iter( + [tensorboard_service.ReadTensorboardBlobDataResponse()] + ) + + client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "time_series=time_series/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + request.time_series = "time_series/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()] + ) + + await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "time_series=time_series/value",) in kw["metadata"] + + +def test_read_tensorboard_blob_data_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [tensorboard_service.ReadTensorboardBlobDataResponse()] + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_blob_data(time_series="time_series_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].time_series == "time_series_value" + + +def test_read_tensorboard_blob_data_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series="time_series_value", + ) + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [tensorboard_service.ReadTensorboardBlobDataResponse()] + ) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_blob_data( + time_series="time_series_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].time_series == "time_series_value" + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series="time_series_value", + ) + + +def test_write_tensorboard_run_data( + transport: str = "grpc", + request_type=tensorboard_service.WriteTensorboardRunDataRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + + response = client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +def test_write_tensorboard_run_data_from_dict(): + test_write_tensorboard_run_data(request_type=dict) + + +def test_write_tensorboard_run_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), "__call__" + ) as call: + client.write_tensorboard_run_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.WriteTensorboardRunDataRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.WriteTensorboardRunDataResponse() + ) + + response = await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async_from_dict(): + await test_write_tensorboard_run_data_async(request_type=dict) + + +def test_write_tensorboard_run_data_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + request.tensorboard_run = "tensorboard_run/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), "__call__" + ) as call: + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + + client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "tensorboard_run=tensorboard_run/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + request.tensorboard_run = "tensorboard_run/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.WriteTensorboardRunDataResponse() + ) + + await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "tensorboard_run=tensorboard_run/value",) in kw[ + "metadata" + ] + + +def test_write_tensorboard_run_data_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_tensorboard_run_data( + tensorboard_run="tensorboard_run_value", + time_series_data=[ + tensorboard_data.TimeSeriesData( + tensorboard_time_series_id="tensorboard_time_series_id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_run == "tensorboard_run_value" + + assert args[0].time_series_data == [ + tensorboard_data.TimeSeriesData( + tensorboard_time_series_id="tensorboard_time_series_id_value" + ) + ] + + +def test_write_tensorboard_run_data_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run="tensorboard_run_value", + time_series_data=[ + tensorboard_data.TimeSeriesData( + tensorboard_time_series_id="tensorboard_time_series_id_value" + ) + ], + ) + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.WriteTensorboardRunDataResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_tensorboard_run_data( + tensorboard_run="tensorboard_run_value", + time_series_data=[ + tensorboard_data.TimeSeriesData( + tensorboard_time_series_id="tensorboard_time_series_id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_run == "tensorboard_run_value" + + assert args[0].time_series_data == [ + tensorboard_data.TimeSeriesData( + tensorboard_time_series_id="tensorboard_time_series_id_value" + ) + ] + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run="tensorboard_run_value", + time_series_data=[ + tensorboard_data.TimeSeriesData( + tensorboard_time_series_id="tensorboard_time_series_id_value" + ) + ], + ) + + +def test_export_tensorboard_time_series_data( + transport: str = "grpc", + request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest, +): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token="next_page_token_value", + ) + + response = client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_export_tensorboard_time_series_data_from_dict(): + test_export_tensorboard_time_series_data(request_type=dict) + + +def test_export_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + client.export_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest, +): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_from_dict(): + await test_export_tensorboard_time_series_data_async(request_type=dict) + + +def test_export_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + request.tensorboard_time_series = "tensorboard_time_series/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + call.return_value = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + ) + + client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_time_series=tensorboard_time_series/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + request.tensorboard_time_series = "tensorboard_time_series/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + ) + + await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_time_series=tensorboard_time_series/value", + ) in kw["metadata"] + + +def test_export_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + ) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_tensorboard_time_series_data( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_time_series == "tensorboard_time_series_value" + + +def test_export_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series="tensorboard_time_series_value", + ) + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_tensorboard_time_series_data( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].tensorboard_time_series == "tensorboard_time_series_value" + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series="tensorboard_time_series_value", + ) + + +def test_export_tensorboard_time_series_data_pager(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token="abc", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], next_page_token="def", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),], + next_page_token="ghi", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_time_series", ""),) + ), + ) + pager = client.export_tensorboard_time_series_data(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) for i in results) + + +def test_export_tensorboard_time_series_data_pages(): + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token="abc", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], next_page_token="def", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),], + next_page_token="ghi", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = list(client.export_tensorboard_time_series_data(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token="abc", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], next_page_token="def", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),], + next_page_token="ghi", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.export_tensorboard_time_series_data(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, tensorboard_data.TimeSeriesDataPoint) for i in responses + ) + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token="abc", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], next_page_token="def", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[tensorboard_data.TimeSeriesDataPoint(),], + next_page_token="ghi", + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.export_tensorboard_time_series_data(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = TensorboardServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = TensorboardServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.TensorboardServiceGrpcTransport,) + + +def test_tensorboard_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.TensorboardServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_tensorboard_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.TensorboardServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_tensorboard", + "get_tensorboard", + "update_tensorboard", + "list_tensorboards", + "delete_tensorboard", + "create_tensorboard_experiment", + "get_tensorboard_experiment", + "update_tensorboard_experiment", + "list_tensorboard_experiments", + "delete_tensorboard_experiment", + "create_tensorboard_run", + "get_tensorboard_run", + "update_tensorboard_run", + "list_tensorboard_runs", + "delete_tensorboard_run", + "create_tensorboard_time_series", + "get_tensorboard_time_series", + "update_tensorboard_time_series", + "list_tensorboard_time_series", + "delete_tensorboard_time_series", + "read_tensorboard_time_series_data", + "read_tensorboard_blob_data", + "write_tensorboard_run_data", + "export_tensorboard_time_series_data", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_tensorboard_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_tensorboard_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport() + adc.assert_called_once() + + +def test_tensorboard_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + TensorboardServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_tensorboard_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.TensorboardServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_tensorboard_service_host_no_port(): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:443" + + +def test_tensorboard_service_host_with_port(): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + ) + assert client.transport._host == "aiplatform.googleapis.com:8000" + + +def test_tensorboard_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_tensorboard_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +def test_tensorboard_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +def test_tensorboard_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_tensorboard_service_grpc_lro_client(): + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_service_grpc_lro_async_client(): + client = TensorboardServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format( + project=project, location=location, tensorboard=tensorboard, + ) + actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "octopus", + "location": "oyster", + "tensorboard": "nudibranch", + } + path = TensorboardServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_path(path) + assert expected == actual + + +def test_tensorboard_experiment_path(): + project = "cuttlefish" + location = "mussel" + tensorboard = "winkle" + experiment = "nautilus" + + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format( + project=project, + location=location, + tensorboard=tensorboard, + experiment=experiment, + ) + actual = TensorboardServiceClient.tensorboard_experiment_path( + project, location, tensorboard, experiment + ) + assert expected == actual + + +def test_parse_tensorboard_experiment_path(): + expected = { + "project": "scallop", + "location": "abalone", + "tensorboard": "squid", + "experiment": "clam", + } + path = TensorboardServiceClient.tensorboard_experiment_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path) + assert expected == actual + + +def test_tensorboard_run_path(): + project = "whelk" + location = "octopus" + tensorboard = "oyster" + experiment = "nudibranch" + run = "cuttlefish" + + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format( + project=project, + location=location, + tensorboard=tensorboard, + experiment=experiment, + run=run, + ) + actual = TensorboardServiceClient.tensorboard_run_path( + project, location, tensorboard, experiment, run + ) + assert expected == actual + + +def test_parse_tensorboard_run_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + "experiment": "scallop", + "run": "abalone", + } + path = TensorboardServiceClient.tensorboard_run_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_run_path(path) + assert expected == actual + + +def test_tensorboard_time_series_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + experiment = "octopus" + run = "oyster" + time_series = "nudibranch" + + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format( + project=project, + location=location, + tensorboard=tensorboard, + experiment=experiment, + run=run, + time_series=time_series, + ) + actual = TensorboardServiceClient.tensorboard_time_series_path( + project, location, tensorboard, experiment, run, time_series + ) + assert expected == actual + + +def test_parse_tensorboard_time_series_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + "tensorboard": "winkle", + "experiment": "nautilus", + "run": "scallop", + "time_series": "abalone", + } + path = TensorboardServiceClient.tensorboard_time_series_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = TensorboardServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TensorboardServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = TensorboardServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TensorboardServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + + expected = "organizations/{organization}".format(organization=organization,) + actual = TensorboardServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TensorboardServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + + expected = "projects/{project}".format(project=project,) + actual = TensorboardServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TensorboardServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = TensorboardServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TensorboardServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.TensorboardServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = TensorboardServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.TensorboardServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = TensorboardServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py index 5f1aec70ab..770c95794f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -46,6 +46,7 @@ from google.cloud.aiplatform_v1beta1.types import vizier_service from google.longrunning import operations_pb2 from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import struct_pb2 as struct # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore @@ -95,7 +96,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [VizierServiceClient, VizierServiceAsyncClient,], + "client_class", [VizierServiceClient, VizierServiceAsyncClient,] ) def test_vizier_service_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() @@ -112,7 +113,7 @@ def test_vizier_service_client_from_service_account_info(client_class): @pytest.mark.parametrize( - "client_class", [VizierServiceClient, VizierServiceAsyncClient,], + "client_class", [VizierServiceClient, VizierServiceAsyncClient,] ) def test_vizier_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -1819,6 +1820,8 @@ def test_create_trial( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) @@ -1840,6 +1843,10 @@ def test_create_trial( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -1883,6 +1890,8 @@ async def test_create_trial_async( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) ) @@ -1904,6 +1913,10 @@ async def test_create_trial_async( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -2056,6 +2069,8 @@ def test_get_trial( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) @@ -2077,6 +2092,10 @@ def test_get_trial( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -2120,6 +2139,8 @@ async def test_get_trial_async( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) ) @@ -2141,6 +2162,10 @@ async def test_get_trial_async( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -2606,6 +2631,8 @@ def test_add_trial_measurement( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) @@ -2627,6 +2654,10 @@ def test_add_trial_measurement( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -2675,6 +2706,8 @@ async def test_add_trial_measurement_async( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) ) @@ -2696,6 +2729,10 @@ async def test_add_trial_measurement_async( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -2775,6 +2812,8 @@ def test_complete_trial( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) @@ -2796,6 +2835,10 @@ def test_complete_trial( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -2839,6 +2882,8 @@ async def test_complete_trial_async( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) ) @@ -2860,6 +2905,10 @@ async def test_complete_trial_async( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -3278,6 +3327,8 @@ def test_stop_trial( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) @@ -3299,6 +3350,10 @@ def test_stop_trial( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value" @@ -3342,6 +3397,8 @@ async def test_stop_trial_async( name="name_value", id="id_value", state=study.Trial.State.REQUESTED, + client_id="client_id_value", + infeasible_reason="infeasible_reason_value", custom_job="custom_job_value", ) ) @@ -3363,6 +3420,10 @@ async def test_stop_trial_async( assert response.state == study.Trial.State.REQUESTED + assert response.client_id == "client_id_value" + + assert response.infeasible_reason == "infeasible_reason_value" + assert response.custom_job == "custom_job_value"